dimensions differ from the
// dimensions specified in the size() call in the sketch, for
// 3D sketches, browsers will either not render or render the
// scene incorrectly. To fix this, we need to adjust the
// width and height attributes of the canvas.
curElement.width = p.width = aWidth || 100;
curElement.height = p.height = aHeight || 100;
curContext = getGLContext(curElement);
canTex = curContext.createTexture(); // texture
textTex = curContext.createTexture(); // texture
} catch(e_size) {
Processing.debug(e_size);
}
if (!curContext) {
throw "WebGL context is not supported on this browser.";
}
// Set defaults
curContext.viewport(0, 0, curElement.width, curElement.height);
curContext.enable(curContext.DEPTH_TEST);
curContext.enable(curContext.BLEND);
curContext.blendFunc(curContext.SRC_ALPHA, curContext.ONE_MINUS_SRC_ALPHA);
// Create the program objects to render 2D (points, lines) and
// 3D (spheres, boxes) shapes. Because 2D shapes are not lit,
// lighting calculations could be ommitted from that program object.
programObject2D = createProgramObject(curContext, vertexShaderSource2D, fragmentShaderSource2D);
programObjectUnlitShape = createProgramObject(curContext, vShaderSrcUnlitShape, fShaderSrcUnlitShape);
// Set the default point and line width for the 2D and unlit shapes.
p.strokeWeight(1.0);
// Now that the programs have been compiled, we can set the default
// states for the lights.
programObject3D = createProgramObject(curContext, vertexShaderSource3D, fragmentShaderSource3D);
curContext.useProgram(programObject3D);
// assume we aren't using textures by default
uniformi("usingTexture3d", programObject3D, "usingTexture", usingTexture);
// assume that we arn't tinting by default
p.lightFalloff(1, 0, 0);
p.shininess(1);
p.ambient(255, 255, 255);
p.specular(0, 0, 0);
p.emissive(0, 0, 0);
// Create buffers for 3D primitives
boxBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, boxBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, boxVerts, curContext.STATIC_DRAW);
boxNormBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, boxNormBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, boxNorms, curContext.STATIC_DRAW);
boxOutlineBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, boxOutlineBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, boxOutlineVerts, curContext.STATIC_DRAW);
// used to draw the rectangle and the outline
rectBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, rectBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, rectVerts, curContext.STATIC_DRAW);
rectNormBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, rectNormBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, rectNorms, curContext.STATIC_DRAW);
// The sphere vertices are specified dynamically since the user
// can change the level of detail. Everytime the user does that
// using sphereDetail(), the new vertices are calculated.
sphereBuffer = curContext.createBuffer();
lineBuffer = curContext.createBuffer();
// Shape buffers
fillBuffer = curContext.createBuffer();
fillColorBuffer = curContext.createBuffer();
strokeColorBuffer = curContext.createBuffer();
shapeTexVBO = curContext.createBuffer();
pointBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, pointBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array([0, 0, 0]), curContext.STATIC_DRAW);
textBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, textBuffer );
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array([1,1,0,-1,1,0,-1,-1,0,1,-1,0]), curContext.STATIC_DRAW);
textureBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ARRAY_BUFFER, textureBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array([0,0,1,0,1,1,0,1]), curContext.STATIC_DRAW);
indexBuffer = curContext.createBuffer();
curContext.bindBuffer(curContext.ELEMENT_ARRAY_BUFFER, indexBuffer);
curContext.bufferData(curContext.ELEMENT_ARRAY_BUFFER, new Uint16Array([0,1,2,2,3,0]), curContext.STATIC_DRAW);
cam = new PMatrix3D();
cameraInv = new PMatrix3D();
modelView = new PMatrix3D();
modelViewInv = new PMatrix3D();
projection = new PMatrix3D();
p.camera();
p.perspective();
userMatrixStack = new PMatrixStack();
userReverseMatrixStack = new PMatrixStack();
// used by both curve and bezier, so just init here
curveBasisMatrix = new PMatrix3D();
curveToBezierMatrix = new PMatrix3D();
curveDrawMatrix = new PMatrix3D();
bezierDrawMatrix = new PMatrix3D();
bezierBasisInverse = new PMatrix3D();
bezierBasisMatrix = new PMatrix3D();
bezierBasisMatrix.set(-1, 3, -3, 1, 3, -6, 3, 0, -3, 3, 0, 0, 1, 0, 0, 0);
DrawingShared.prototype.size.apply(this, arguments);
};
}());
////////////////////////////////////////////////////////////////////////////
// Lights
////////////////////////////////////////////////////////////////////////////
/**
* Adds an ambient light. Ambient light doesn't come from a specific direction,
* the rays have light have bounced around so much that objects are evenly lit
* from all sides. Ambient lights are almost always used in combination with
* other types of lights. Lights need to be included in the draw() to
* remain persistent in a looping program. Placing them in the setup()
* of a looping program will cause them to only have an effect the first time
* through the loop. The effect of the parameters is determined by the current
* color mode.
*
* @param {int | float} r red or hue value
* @param {int | float} g green or hue value
* @param {int | float} b blue or hue value
*
* @param {int | float} x x position of light (used for falloff)
* @param {int | float} y y position of light (used for falloff)
* @param {int | float} z z position of light (used for falloff)
*
* @returns none
*
* @see lights
* @see directionalLight
* @see pointLight
* @see spotLight
*/
Drawing2D.prototype.ambientLight = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.ambientLight = function(r, g, b, x, y, z) {
if (lightCount === PConstants.MAX_LIGHTS) {
throw "can only create " + PConstants.MAX_LIGHTS + " lights";
}
var pos = new PVector(x, y, z);
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.mult(pos, pos);
// Instead of calling p.color, we do the calculations ourselves to
// reduce property lookups.
var col = color$4(r, g, b, 0);
var normalizedCol = [ ((col & PConstants.RED_MASK) >>> 16) / 255,
((col & PConstants.GREEN_MASK) >>> 8) / 255,
(col & PConstants.BLUE_MASK) / 255 ];
curContext.useProgram(programObject3D);
uniformf("lights.color.3d." + lightCount, programObject3D, "lights" + lightCount + ".color", normalizedCol);
uniformf("lights.position.3d." + lightCount, programObject3D, "lights" + lightCount + ".position", pos.array());
uniformi("lights.type.3d." + lightCount, programObject3D, "lights" + lightCount + ".type", 0);
uniformi("lightCount3d", programObject3D, "lightCount", ++lightCount);
};
/**
* Adds a directional light. Directional light comes from one direction and
* is stronger when hitting a surface squarely and weaker if it hits at a
* gentle angle. After hitting a surface, a directional lights scatters in
* all directions. Lights need to be included in the draw() to remain
* persistent in a looping program. Placing them in the setup() of a
* looping program will cause them to only have an effect the first time
* through the loop. The affect of the
r,
g, and
b
* parameters is determined by the current color mode. The nx,
* ny, and nz parameters specify the direction the light is
* facing. For example, setting ny to -1 will cause the geometry to be
* lit from below (the light is facing directly upward).
*
* @param {int | float} r red or hue value
* @param {int | float} g green or hue value
* @param {int | float} b blue or hue value
*
* @param {int | float} nx direction along the x axis
* @param {int | float} ny direction along the y axis
* @param {int | float} nz direction along the z axis
*
* @returns none
*
* @see lights
* @see ambientLight
* @see pointLight
* @see spotLight
*/
Drawing2D.prototype.directionalLight = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.directionalLight = function(r, g, b, nx, ny, nz) {
if (lightCount === PConstants.MAX_LIGHTS) {
throw "can only create " + PConstants.MAX_LIGHTS + " lights";
}
curContext.useProgram(programObject3D);
var mvm = new PMatrix3D();
mvm.scale(1, -1, 1);
mvm.apply(modelView.array());
mvm = mvm.array();
// We need to multiply the direction by the model view matrix, but
// the mult function checks the w component of the vector, if it isn't
// present, it uses 1, so we manually multiply.
var dir = [
mvm[0] * nx + mvm[4] * ny + mvm[8] * nz,
mvm[1] * nx + mvm[5] * ny + mvm[9] * nz,
mvm[2] * nx + mvm[6] * ny + mvm[10] * nz
];
// Instead of calling p.color, we do the calculations ourselves to
// reduce property lookups.
var col = color$4(r, g, b, 0);
var normalizedCol = [ ((col & PConstants.RED_MASK) >>> 16) / 255,
((col & PConstants.GREEN_MASK) >>> 8) / 255,
(col & PConstants.BLUE_MASK) / 255 ];
uniformf("lights.color.3d." + lightCount, programObject3D, "lights" + lightCount + ".color", normalizedCol);
uniformf("lights.position.3d." + lightCount, programObject3D, "lights" + lightCount + ".position", dir);
uniformi("lights.type.3d." + lightCount, programObject3D, "lights" + lightCount + ".type", 1);
uniformi("lightCount3d", programObject3D, "lightCount", ++lightCount);
};
/**
* Sets the falloff rates for point lights, spot lights, and ambient lights.
* The parameters are used to determine the falloff with the following equation:
*
* d = distance from light position to vertex position
* falloff = 1 / (CONSTANT + d * LINEAR + (d*d) * QUADRATIC)
*
* Like fill(), it affects only the elements which are created after it in the
* code. The default value if LightFalloff(1.0, 0.0, 0.0). Thinking about an
* ambient light with a falloff can be tricky. It is used, for example, if you
* wanted a region of your scene to be lit ambiently one color and another region
* to be lit ambiently by another color, you would use an ambient light with location
* and falloff. You can think of it as a point light that doesn't care which direction
* a surface is facing.
*
* @param {int | float} constant constant value for determining falloff
* @param {int | float} linear linear value for determining falloff
* @param {int | float} quadratic quadratic value for determining falloff
*
* @returns none
*
* @see lights
* @see ambientLight
* @see pointLight
* @see spotLight
* @see lightSpecular
*/
Drawing2D.prototype.lightFalloff = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.lightFalloff = function(constant, linear, quadratic) {
curContext.useProgram(programObject3D);
uniformf("falloff3d", programObject3D, "falloff", [constant, linear, quadratic]);
};
/**
* Sets the specular color for lights. Like fill(), it affects only the
* elements which are created after it in the code. Specular refers to light
* which bounces off a surface in a perferred direction (rather than bouncing
* in all directions like a diffuse light) and is used for creating highlights.
* The specular quality of a light interacts with the specular material qualities
* set through the specular() and shininess() functions.
*
* @param {int | float} r red or hue value
* @param {int | float} g green or hue value
* @param {int | float} b blue or hue value
*
* @returns none
*
* @see lights
* @see ambientLight
* @see pointLight
* @see spotLight
*/
Drawing2D.prototype.lightSpecular = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.lightSpecular = function(r, g, b) {
// Instead of calling p.color, we do the calculations ourselves to
// reduce property lookups.
var col = color$4(r, g, b, 0);
var normalizedCol = [ ((col & PConstants.RED_MASK) >>> 16) / 255,
((col & PConstants.GREEN_MASK) >>> 8) / 255,
(col & PConstants.BLUE_MASK) / 255 ];
curContext.useProgram(programObject3D);
uniformf("specular3d", programObject3D, "specular", normalizedCol);
};
/**
* Sets the default ambient light, directional light, falloff, and specular
* values. The defaults are ambientLight(128, 128, 128) and
* directionalLight(128, 128, 128, 0, 0, -1), lightFalloff(1, 0, 0), and
* lightSpecular(0, 0, 0). Lights need to be included in the draw() to remain
* persistent in a looping program. Placing them in the setup() of a looping
* program will cause them to only have an effect the first time through the
* loop.
*
* @returns none
*
* @see ambientLight
* @see directionalLight
* @see pointLight
* @see spotLight
* @see noLights
*
*/
p.lights = function() {
p.ambientLight(128, 128, 128);
p.directionalLight(128, 128, 128, 0, 0, -1);
p.lightFalloff(1, 0, 0);
p.lightSpecular(0, 0, 0);
};
/**
* Adds a point light. Lights need to be included in the draw() to remain
* persistent in a looping program. Placing them in the setup() of a
* looping program will cause them to only have an effect the first time through
* the loop. The affect of the r, g, and b parameters
* is determined by the current color mode. The x, y, and z
* parameters set the position of the light.
*
* @param {int | float} r red or hue value
* @param {int | float} g green or hue value
* @param {int | float} b blue or hue value
* @param {int | float} x x coordinate of the light
* @param {int | float} y y coordinate of the light
* @param {int | float} z z coordinate of the light
*
* @returns none
*
* @see lights
* @see directionalLight
* @see ambientLight
* @see spotLight
*/
Drawing2D.prototype.pointLight = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.pointLight = function(r, g, b, x, y, z) {
if (lightCount === PConstants.MAX_LIGHTS) {
throw "can only create " + PConstants.MAX_LIGHTS + " lights";
}
// Place the point in view space once instead of once per vertex
// in the shader.
var pos = new PVector(x, y, z);
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.mult(pos, pos);
// Instead of calling p.color, we do the calculations ourselves to
// reduce property lookups.
var col = color$4(r, g, b, 0);
var normalizedCol = [ ((col & PConstants.RED_MASK) >>> 16) / 255,
((col & PConstants.GREEN_MASK) >>> 8) / 255,
(col & PConstants.BLUE_MASK) / 255 ];
curContext.useProgram(programObject3D);
uniformf("lights.color.3d." + lightCount, programObject3D, "lights" + lightCount + ".color", normalizedCol);
uniformf("lights.position.3d." + lightCount, programObject3D, "lights" + lightCount + ".position", pos.array());
uniformi("lights.type.3d." + lightCount, programObject3D, "lights" + lightCount + ".type", 2);
uniformi("lightCount3d", programObject3D, "lightCount", ++lightCount);
};
/**
* Disable all lighting. Lighting is turned off by default and enabled with
* the lights() method. This function can be used to disable lighting so
* that 2D geometry (which does not require lighting) can be drawn after a
* set of lighted 3D geometry.
*
* @returns none
*
* @see lights
*/
Drawing2D.prototype.noLights = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.noLights = function() {
lightCount = 0;
curContext.useProgram(programObject3D);
uniformi("lightCount3d", programObject3D, "lightCount", lightCount);
};
/**
* Adds a spot light. Lights need to be included in the draw() to
* remain persistent in a looping program. Placing them in the setup()
* of a looping program will cause them to only have an effect the first time
* through the loop. The affect of the r, g, and b parameters
* is determined by the current color mode. The x, y, and z
* parameters specify the position of the light and nx, ny, nz
* specify the direction or light. The angle parameter affects angle of the
* spotlight cone.
*
* @param {int | float} r red or hue value
* @param {int | float} g green or hue value
* @param {int | float} b blue or hue value
* @param {int | float} x coordinate of the light
* @param {int | float} y coordinate of the light
* @param {int | float} z coordinate of the light
* @param {int | float} nx direction along the x axis
* @param {int | float} ny direction along the y axis
* @param {int | float} nz direction along the z axis
* @param {float} angle angle of the spotlight cone
* @param {float} concentration exponent determining the center bias of the cone
*
* @returns none
*
* @see lights
* @see directionalLight
* @see ambientLight
* @see pointLight
*/
Drawing2D.prototype.spotLight = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.spotLight = function(r, g, b, x, y, z, nx, ny, nz, angle, concentration) {
if (lightCount === PConstants.MAX_LIGHTS) {
throw "can only create " + PConstants.MAX_LIGHTS + " lights";
}
curContext.useProgram(programObject3D);
// multiply the position and direction by the model view matrix
// once per object rather than once per vertex.
var pos = new PVector(x, y, z);
var mvm = new PMatrix3D();
mvm.scale(1, -1, 1);
mvm.apply(modelView.array());
mvm.mult(pos, pos);
// Convert to array since we need to directly access the elements.
mvm = mvm.array();
// We need to multiply the direction by the model view matrix, but
// the mult function checks the w component of the vector, if it isn't
// present, it uses 1, so we use a very small value as a work around.
var dir = [
mvm[0] * nx + mvm[4] * ny + mvm[8] * nz,
mvm[1] * nx + mvm[5] * ny + mvm[9] * nz,
mvm[2] * nx + mvm[6] * ny + mvm[10] * nz
];
// Instead of calling p.color, we do the calculations ourselves to
// reduce property lookups.
var col = color$4(r, g, b, 0);
var normalizedCol = [ ((col & PConstants.RED_MASK) >>> 16) / 255,
((col & PConstants.GREEN_MASK) >>> 8) / 255,
(col & PConstants.BLUE_MASK) / 255 ];
uniformf("lights.color.3d." + lightCount, programObject3D, "lights" + lightCount + ".color", normalizedCol);
uniformf("lights.position.3d." + lightCount, programObject3D, "lights" + lightCount + ".position", pos.array());
uniformf("lights.direction.3d." + lightCount, programObject3D, "lights" + lightCount + ".direction", dir);
uniformf("lights.concentration.3d." + lightCount, programObject3D, "lights" + lightCount + ".concentration", concentration);
uniformf("lights.angle.3d." + lightCount, programObject3D, "lights" + lightCount + ".angle", angle);
uniformi("lights.type.3d." + lightCount, programObject3D, "lights" + lightCount + ".type", 3);
uniformi("lightCount3d", programObject3D, "lightCount", ++lightCount);
};
////////////////////////////////////////////////////////////////////////////
// Camera functions
////////////////////////////////////////////////////////////////////////////
/**
* The beginCamera() and endCamera() functions enable advanced customization of the camera space.
* The functions are useful if you want to more control over camera movement, however for most users, the camera()
* function will be sufficient.
The camera functions will replace any transformations (such as rotate()
* or translate()) that occur before them in draw(), but they will not automatically replace the camera
* transform itself. For this reason, camera functions should be placed at the beginning of draw() (so that
* transformations happen afterwards), and the camera() function can be used after beginCamera() if
* you want to reset the camera before applying transformations.
This function sets the matrix mode to the
* camera matrix so calls such as translate(), rotate(), applyMatrix() and resetMatrix() affect the camera.
* beginCamera() should always be used with a following endCamera() and pairs of beginCamera() and
* endCamera() cannot be nested.
*
* @see camera
* @see endCamera
* @see applyMatrix
* @see resetMatrix
* @see translate
* @see rotate
* @see scale
*/
Drawing2D.prototype.beginCamera = function() {
throw ("beginCamera() is not available in 2D mode");
};
Drawing3D.prototype.beginCamera = function() {
if (manipulatingCamera) {
throw ("You cannot call beginCamera() again before calling endCamera()");
}
manipulatingCamera = true;
modelView = cameraInv;
modelViewInv = cam;
};
/**
* The beginCamera() and endCamera() functions enable advanced customization of the camera space.
* Please see the reference for beginCamera() for a description of how the functions are used.
*
* @see beginCamera
*/
Drawing2D.prototype.endCamera = function() {
throw ("endCamera() is not available in 2D mode");
};
Drawing3D.prototype.endCamera = function() {
if (!manipulatingCamera) {
throw ("You cannot call endCamera() before calling beginCamera()");
}
modelView.set(cam);
modelViewInv.set(cameraInv);
manipulatingCamera = false;
};
/**
* Sets the position of the camera through setting the eye position, the center of the scene, and which axis is facing
* upward. Moving the eye position and the direction it is pointing (the center of the scene) allows the images to be
* seen from different angles. The version without any parameters sets the camera to the default position, pointing to
* the center of the display window with the Y axis as up. The default values are camera(width/2.0, height/2.0,
* (height/2.0) / tan(PI*60.0 / 360.0), width/2.0, height/2.0, 0, 0, 1, 0). This function is similar to gluLookAt()
* in OpenGL, but it first clears the current camera settings.
*
* @param {float} eyeX x-coordinate for the eye
* @param {float} eyeY y-coordinate for the eye
* @param {float} eyeZ z-coordinate for the eye
* @param {float} centerX x-coordinate for the center of the scene
* @param {float} centerY y-coordinate for the center of the scene
* @param {float} centerZ z-coordinate for the center of the scene
* @param {float} upX usually 0.0, 1.0, -1.0
* @param {float} upY usually 0.0, 1.0, -1.0
* @param {float} upZ usually 0.0, 1.0, -1.0
*
* @see beginCamera
* @see endCamera
* @see frustum
*/
p.camera = function(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ) {
if (eyeX === undef) {
// Workaround if createGraphics is used.
cameraX = p.width / 2;
cameraY = p.height / 2;
// XXX(jeresig)
cameraZ = cameraY / p.tan(cameraFOV / 2);
eyeX = cameraX;
eyeY = cameraY;
eyeZ = cameraZ;
centerX = cameraX;
centerY = cameraY;
centerZ = 0;
upX = 0;
upY = 1;
upZ = 0;
}
var z = new PVector(eyeX - centerX, eyeY - centerY, eyeZ - centerZ);
var y = new PVector(upX, upY, upZ);
z.normalize();
var x = PVector.cross(y, z);
y = PVector.cross(z, x);
x.normalize();
y.normalize();
var xX = x.x,
xY = x.y,
xZ = x.z;
var yX = y.x,
yY = y.y,
yZ = y.z;
var zX = z.x,
zY = z.y,
zZ = z.z;
cam.set(xX, xY, xZ, 0, yX, yY, yZ, 0, zX, zY, zZ, 0, 0, 0, 0, 1);
cam.translate(-eyeX, -eyeY, -eyeZ);
cameraInv.reset();
cameraInv.invApply(xX, xY, xZ, 0, yX, yY, yZ, 0, zX, zY, zZ, 0, 0, 0, 0, 1);
cameraInv.translate(eyeX, eyeY, eyeZ);
modelView.set(cam);
modelViewInv.set(cameraInv);
};
/**
* Sets a perspective projection applying foreshortening, making distant objects appear smaller than closer ones. The
* parameters define a viewing volume with the shape of truncated pyramid. Objects near to the front of the volume appear
* their actual size, while farther objects appear smaller. This projection simulates the perspective of the world more
* accurately than orthographic projection. The version of perspective without parameters sets the default perspective and
* the version with four parameters allows the programmer to set the area precisely. The default values are:
* perspective(PI/3.0, width/height, cameraZ/10.0, cameraZ*10.0) where cameraZ is ((height/2.0) / tan(PI*60.0/360.0));
*
* @param {float} fov field-of-view angle (in radians) for vertical direction
* @param {float} aspect ratio of width to height
* @param {float} zNear z-position of nearest clipping plane
* @param {float} zFar z-positions of farthest clipping plane
*/
p.perspective = function(fov, aspect, near, far) {
if (arguments.length === 0) {
//in case canvas is resized
cameraY = curElement.height / 2;
// XXX(jeresig)
cameraZ = cameraY / p.tan(cameraFOV / 2);
cameraNear = cameraZ / 10;
cameraFar = cameraZ * 10;
cameraAspect = p.width / p.height;
fov = cameraFOV;
aspect = cameraAspect;
near = cameraNear;
far = cameraFar;
}
var yMax, yMin, xMax, xMin;
// XXX(jeresig)
yMax = near * p.tan(fov / 2);
yMin = -yMax;
xMax = yMax * aspect;
xMin = yMin * aspect;
p.frustum(xMin, xMax, yMin, yMax, near, far);
};
/**
* Sets a perspective matrix defined through the parameters. Works like glFrustum, except it wipes out the current
* perspective matrix rather than muliplying itself with it.
*
* @param {float} left left coordinate of the clipping plane
* @param {float} right right coordinate of the clipping plane
* @param {float} bottom bottom coordinate of the clipping plane
* @param {float} top top coordinate of the clipping plane
* @param {float} near near coordinate of the clipping plane
* @param {float} far far coordinate of the clipping plane
*
* @see beginCamera
* @see camera
* @see endCamera
* @see perspective
*/
Drawing2D.prototype.frustum = function() {
throw("Processing.js: frustum() is not supported in 2D mode");
};
Drawing3D.prototype.frustum = function(left, right, bottom, top, near, far) {
frustumMode = true;
projection = new PMatrix3D();
projection.set((2 * near) / (right - left), 0, (right + left) / (right - left),
0, 0, (2 * near) / (top - bottom), (top + bottom) / (top - bottom),
0, 0, 0, -(far + near) / (far - near), -(2 * far * near) / (far - near),
0, 0, -1, 0);
var proj = new PMatrix3D();
proj.set(projection);
proj.transpose();
curContext.useProgram(programObject2D);
uniformMatrix("projection2d", programObject2D, "projection", false, proj.array());
curContext.useProgram(programObject3D);
uniformMatrix("projection3d", programObject3D, "projection", false, proj.array());
curContext.useProgram(programObjectUnlitShape);
uniformMatrix("uProjectionUS", programObjectUnlitShape, "uProjection", false, proj.array());
};
/**
* Sets an orthographic projection and defines a parallel clipping volume. All objects with the same dimension appear
* the same size, regardless of whether they are near or far from the camera. The parameters to this function specify
* the clipping volume where left and right are the minimum and maximum x values, top and bottom are the minimum and
* maximum y values, and near and far are the minimum and maximum z values. If no parameters are given, the default
* is used: ortho(0, width, 0, height, -10, 10).
*
* @param {float} left left plane of the clipping volume
* @param {float} right right plane of the clipping volume
* @param {float} bottom bottom plane of the clipping volume
* @param {float} top top plane of the clipping volume
* @param {float} near maximum distance from the origin to the viewer
* @param {float} far maximum distance from the origin away from the viewer
*/
p.ortho = function(left, right, bottom, top, near, far) {
if (arguments.length === 0) {
left = 0;
right = p.width;
bottom = 0;
top = p.height;
near = -10;
far = 10;
}
var x = 2 / (right - left);
var y = 2 / (top - bottom);
var z = -2 / (far - near);
var tx = -(right + left) / (right - left);
var ty = -(top + bottom) / (top - bottom);
var tz = -(far + near) / (far - near);
projection = new PMatrix3D();
projection.set(x, 0, 0, tx, 0, y, 0, ty, 0, 0, z, tz, 0, 0, 0, 1);
var proj = new PMatrix3D();
proj.set(projection);
proj.transpose();
curContext.useProgram(programObject2D);
uniformMatrix("projection2d", programObject2D, "projection", false, proj.array());
curContext.useProgram(programObject3D);
uniformMatrix("projection3d", programObject3D, "projection", false, proj.array());
curContext.useProgram(programObjectUnlitShape);
uniformMatrix("uProjectionUS", programObjectUnlitShape, "uProjection", false, proj.array());
frustumMode = false;
};
/**
* The printProjection() prints the current projection matrix to the text window.
*/
p.printProjection = function() {
projection.print();
};
/**
* The printCamera() function prints the current camera matrix.
*/
p.printCamera = function() {
cam.print();
};
////////////////////////////////////////////////////////////////////////////
// Shapes
////////////////////////////////////////////////////////////////////////////
/**
* The box() function renders a box. A box is an extruded rectangle. A box with equal dimension on all sides is a cube.
* Calling this function with only one parameter will create a cube.
*
* @param {int|float} w dimension of the box in the x-dimension
* @param {int|float} h dimension of the box in the y-dimension
* @param {int|float} d dimension of the box in the z-dimension
*/
Drawing2D.prototype.box = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.box = function(w, h, d) {
// user can uniformly scale the box by
// passing in only one argument.
if (!h || !d) {
h = d = w;
}
// Modeling transformation
var model = new PMatrix3D();
model.scale(w, h, d);
// viewing transformation needs to have Y flipped
// becuase that's what Processing does.
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
if (doFill) {
curContext.useProgram(programObject3D);
uniformMatrix("model3d", programObject3D, "model", false, model.array());
uniformMatrix("view3d", programObject3D, "view", false, view.array());
// fix stitching problems. (lines get occluded by triangles
// since they share the same depth values). This is not entirely
// working, but it's a start for drawing the outline. So
// developers can start playing around with styles.
curContext.enable(curContext.POLYGON_OFFSET_FILL);
curContext.polygonOffset(1, 1);
uniformf("color3d", programObject3D, "color", fillStyle);
// Calculating the normal matrix can be expensive, so only
// do it if it's necessary
if(lightCount > 0){
// Create the normal transformation matrix
var v = new PMatrix3D();
v.set(view);
var m = new PMatrix3D();
m.set(model);
v.mult(m);
var normalMatrix = new PMatrix3D();
normalMatrix.set(v);
normalMatrix.invert();
normalMatrix.transpose();
uniformMatrix("normalTransform3d", programObject3D, "normalTransform", false, normalMatrix.array());
vertexAttribPointer("normal3d", programObject3D, "Normal", 3, boxNormBuffer);
}
else{
disableVertexAttribPointer("normal3d", programObject3D, "Normal");
}
vertexAttribPointer("vertex3d", programObject3D, "Vertex", 3, boxBuffer);
// Turn off per vertex colors
disableVertexAttribPointer("aColor3d", programObject3D, "aColor");
disableVertexAttribPointer("aTexture3d", programObject3D, "aTexture");
curContext.drawArrays(curContext.TRIANGLES, 0, boxVerts.length / 3);
curContext.disable(curContext.POLYGON_OFFSET_FILL);
}
if (lineWidth > 0 && doStroke) {
curContext.useProgram(programObject2D);
uniformMatrix("model2d", programObject2D, "model", false, model.array());
uniformMatrix("view2d", programObject2D, "view", false, view.array());
uniformf("color2d", programObject2D, "color", strokeStyle);
uniformi("picktype2d", programObject2D, "picktype", 0);
vertexAttribPointer("vertex2d", programObject2D, "Vertex", 3, boxOutlineBuffer);
disableVertexAttribPointer("aTextureCoord2d", programObject2D, "aTextureCoord");
curContext.drawArrays(curContext.LINES, 0, boxOutlineVerts.length / 3);
}
};
/**
* The initSphere() function is a helper function used by sphereDetail()
* This function creates and stores sphere vertices every time the user changes sphere detail.
*
* @see #sphereDetail
*/
var initSphere = function() {
var i;
sphereVerts = [];
for (i = 0; i < sphereDetailU; i++) {
sphereVerts.push(0);
sphereVerts.push(-1);
sphereVerts.push(0);
sphereVerts.push(sphereX[i]);
sphereVerts.push(sphereY[i]);
sphereVerts.push(sphereZ[i]);
}
sphereVerts.push(0);
sphereVerts.push(-1);
sphereVerts.push(0);
sphereVerts.push(sphereX[0]);
sphereVerts.push(sphereY[0]);
sphereVerts.push(sphereZ[0]);
var v1, v11, v2;
// middle rings
var voff = 0;
for (i = 2; i < sphereDetailV; i++) {
v1 = v11 = voff;
voff += sphereDetailU;
v2 = voff;
for (var j = 0; j < sphereDetailU; j++) {
sphereVerts.push(sphereX[v1]);
sphereVerts.push(sphereY[v1]);
sphereVerts.push(sphereZ[v1++]);
sphereVerts.push(sphereX[v2]);
sphereVerts.push(sphereY[v2]);
sphereVerts.push(sphereZ[v2++]);
}
// close each ring
v1 = v11;
v2 = voff;
sphereVerts.push(sphereX[v1]);
sphereVerts.push(sphereY[v1]);
sphereVerts.push(sphereZ[v1]);
sphereVerts.push(sphereX[v2]);
sphereVerts.push(sphereY[v2]);
sphereVerts.push(sphereZ[v2]);
}
// add the northern cap
for (i = 0; i < sphereDetailU; i++) {
v2 = voff + i;
sphereVerts.push(sphereX[v2]);
sphereVerts.push(sphereY[v2]);
sphereVerts.push(sphereZ[v2]);
sphereVerts.push(0);
sphereVerts.push(1);
sphereVerts.push(0);
}
sphereVerts.push(sphereX[voff]);
sphereVerts.push(sphereY[voff]);
sphereVerts.push(sphereZ[voff]);
sphereVerts.push(0);
sphereVerts.push(1);
sphereVerts.push(0);
//set the buffer data
curContext.bindBuffer(curContext.ARRAY_BUFFER, sphereBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(sphereVerts), curContext.STATIC_DRAW);
};
/**
* The sphereDetail() function controls the detail used to render a sphere by adjusting the number of
* vertices of the sphere mesh. The default resolution is 30, which creates
* a fairly detailed sphere definition with vertices every 360/30 = 12
* degrees. If you're going to render a great number of spheres per frame,
* it is advised to reduce the level of detail using this function.
* The setting stays active until sphereDetail() is called again with
* a new parameter and so should not be called prior to every
* sphere() statement, unless you wish to render spheres with
* different settings, e.g. using less detail for smaller spheres or ones
* further away from the camera. To control the detail of the horizontal
* and vertical resolution independently, use the version of the functions
* with two parameters. Calling this function with one parameter sets the number of segments
*(minimum of 3) used per full circle revolution. This is equivalent to calling the function with
* two identical values.
*
* @param {int} ures number of segments used horizontally (longitudinally) per full circle revolution
* @param {int} vres number of segments used vertically (latitudinally) from top to bottom
*
* @see #sphere()
*/
p.sphereDetail = function(ures, vres) {
var i;
if (arguments.length === 1) {
ures = vres = arguments[0];
}
if (ures < 3) {
ures = 3;
} // force a minimum res
if (vres < 2) {
vres = 2;
} // force a minimum res
// if it hasn't changed do nothing
if ((ures === sphereDetailU) && (vres === sphereDetailV)) {
return;
}
var delta = PConstants.SINCOS_LENGTH / ures;
var cx = new Float32Array(ures);
var cz = new Float32Array(ures);
// calc unit circle in XZ plane
for (i = 0; i < ures; i++) {
cx[i] = cosLUT[((i * delta) % PConstants.SINCOS_LENGTH) | 0];
cz[i] = sinLUT[((i * delta) % PConstants.SINCOS_LENGTH) | 0];
}
// computing vertexlist
// vertexlist starts at south pole
var vertCount = ures * (vres - 1) + 2;
var currVert = 0;
// re-init arrays to store vertices
sphereX = new Float32Array(vertCount);
sphereY = new Float32Array(vertCount);
sphereZ = new Float32Array(vertCount);
var angle_step = (PConstants.SINCOS_LENGTH * 0.5) / vres;
var angle = angle_step;
// step along Y axis
for (i = 1; i < vres; i++) {
var curradius = sinLUT[(angle % PConstants.SINCOS_LENGTH) | 0];
var currY = -cosLUT[(angle % PConstants.SINCOS_LENGTH) | 0];
for (var j = 0; j < ures; j++) {
sphereX[currVert] = cx[j] * curradius;
sphereY[currVert] = currY;
sphereZ[currVert++] = cz[j] * curradius;
}
angle += angle_step;
}
sphereDetailU = ures;
sphereDetailV = vres;
// make the sphere verts and norms
initSphere();
};
/**
* The sphere() function draws a sphere with radius r centered at coordinate 0, 0, 0.
* A sphere is a hollow ball made from tessellated triangles.
*
* @param {int|float} r the radius of the sphere
*/
Drawing2D.prototype.sphere = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.sphere = function() {
var sRad = arguments[0];
if ((sphereDetailU < 3) || (sphereDetailV < 2)) {
p.sphereDetail(30);
}
// Modeling transformation
var model = new PMatrix3D();
model.scale(sRad, sRad, sRad);
// viewing transformation needs to have Y flipped
// becuase that's what Processing does.
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
if (doFill) {
// Calculating the normal matrix can be expensive, so only
// do it if it's necessary
if(lightCount > 0){
// Create a normal transformation matrix
var v = new PMatrix3D();
v.set(view);
var m = new PMatrix3D();
m.set(model);
v.mult(m);
var normalMatrix = new PMatrix3D();
normalMatrix.set(v);
normalMatrix.invert();
normalMatrix.transpose();
uniformMatrix("normalTransform3d", programObject3D, "normalTransform", false, normalMatrix.array());
vertexAttribPointer("normal3d", programObject3D, "Normal", 3, sphereBuffer);
}
else{
disableVertexAttribPointer("normal3d", programObject3D, "Normal");
}
curContext.useProgram(programObject3D);
disableVertexAttribPointer("aTexture3d", programObject3D, "aTexture");
uniformMatrix("model3d", programObject3D, "model", false, model.array());
uniformMatrix("view3d", programObject3D, "view", false, view.array());
vertexAttribPointer("vertex3d", programObject3D, "Vertex", 3, sphereBuffer);
// Turn off per vertex colors
disableVertexAttribPointer("aColor3d", programObject3D, "aColor");
// fix stitching problems. (lines get occluded by triangles
// since they share the same depth values). This is not entirely
// working, but it's a start for drawing the outline. So
// developers can start playing around with styles.
curContext.enable(curContext.POLYGON_OFFSET_FILL);
curContext.polygonOffset(1, 1);
uniformf("color3d", programObject3D, "color", fillStyle);
curContext.drawArrays(curContext.TRIANGLE_STRIP, 0, sphereVerts.length / 3);
curContext.disable(curContext.POLYGON_OFFSET_FILL);
}
if (lineWidth > 0 && doStroke) {
curContext.useProgram(programObject2D);
uniformMatrix("model2d", programObject2D, "model", false, model.array());
uniformMatrix("view2d", programObject2D, "view", false, view.array());
vertexAttribPointer("vertex2d", programObject2D, "Vertex", 3, sphereBuffer);
disableVertexAttribPointer("aTextureCoord2d", programObject2D, "aTextureCoord");
uniformf("color2d", programObject2D, "color", strokeStyle);
uniformi("picktype2d", programObject2D, "picktype", 0);
curContext.drawArrays(curContext.LINE_STRIP, 0, sphereVerts.length / 3);
}
};
////////////////////////////////////////////////////////////////////////////
// Coordinates
////////////////////////////////////////////////////////////////////////////
/**
* Returns the three-dimensional X, Y, Z position in model space. This returns
* the X value for a given coordinate based on the current set of transformations
* (scale, rotate, translate, etc.) The X value can be used to place an object
* in space relative to the location of the original point once the transformations
* are no longer in use.
*
*
* @param {int | float} x 3D x coordinate to be mapped
* @param {int | float} y 3D y coordinate to be mapped
* @param {int | float} z 3D z coordinate to be mapped
*
* @returns {float}
*
* @see modelY
* @see modelZ
*/
p.modelX = function(x, y, z) {
var mv = modelView.array();
var ci = cameraInv.array();
var ax = mv[0] * x + mv[1] * y + mv[2] * z + mv[3];
var ay = mv[4] * x + mv[5] * y + mv[6] * z + mv[7];
var az = mv[8] * x + mv[9] * y + mv[10] * z + mv[11];
var aw = mv[12] * x + mv[13] * y + mv[14] * z + mv[15];
var ox = ci[0] * ax + ci[1] * ay + ci[2] * az + ci[3] * aw;
var ow = ci[12] * ax + ci[13] * ay + ci[14] * az + ci[15] * aw;
return (ow !== 0) ? ox / ow : ox;
};
/**
* Returns the three-dimensional X, Y, Z position in model space. This returns
* the Y value for a given coordinate based on the current set of transformations
* (scale, rotate, translate, etc.) The Y value can be used to place an object in
* space relative to the location of the original point once the transformations
* are no longer in use.
*
*
* @param {int | float} x 3D x coordinate to be mapped
* @param {int | float} y 3D y coordinate to be mapped
* @param {int | float} z 3D z coordinate to be mapped
*
* @returns {float}
*
* @see modelX
* @see modelZ
*/
p.modelY = function(x, y, z) {
var mv = modelView.array();
var ci = cameraInv.array();
var ax = mv[0] * x + mv[1] * y + mv[2] * z + mv[3];
var ay = mv[4] * x + mv[5] * y + mv[6] * z + mv[7];
var az = mv[8] * x + mv[9] * y + mv[10] * z + mv[11];
var aw = mv[12] * x + mv[13] * y + mv[14] * z + mv[15];
var oy = ci[4] * ax + ci[5] * ay + ci[6] * az + ci[7] * aw;
var ow = ci[12] * ax + ci[13] * ay + ci[14] * az + ci[15] * aw;
return (ow !== 0) ? oy / ow : oy;
};
/**
* Returns the three-dimensional X, Y, Z position in model space. This returns
* the Z value for a given coordinate based on the current set of transformations
* (scale, rotate, translate, etc.) The Z value can be used to place an object in
* space relative to the location of the original point once the transformations
* are no longer in use.
*
* @param {int | float} x 3D x coordinate to be mapped
* @param {int | float} y 3D y coordinate to be mapped
* @param {int | float} z 3D z coordinate to be mapped
*
* @returns {float}
*
* @see modelX
* @see modelY
*/
p.modelZ = function(x, y, z) {
var mv = modelView.array();
var ci = cameraInv.array();
var ax = mv[0] * x + mv[1] * y + mv[2] * z + mv[3];
var ay = mv[4] * x + mv[5] * y + mv[6] * z + mv[7];
var az = mv[8] * x + mv[9] * y + mv[10] * z + mv[11];
var aw = mv[12] * x + mv[13] * y + mv[14] * z + mv[15];
var oz = ci[8] * ax + ci[9] * ay + ci[10] * az + ci[11] * aw;
var ow = ci[12] * ax + ci[13] * ay + ci[14] * az + ci[15] * aw;
return (ow !== 0) ? oz / ow : oz;
};
////////////////////////////////////////////////////////////////////////////
// Material Properties
////////////////////////////////////////////////////////////////////////////
/**
* Sets the ambient reflectance for shapes drawn to the screen. This is
* combined with the ambient light component of environment. The color
* components set through the parameters define the reflectance. For example in
* the default color mode, setting v1=255, v2=126, v3=0, would cause all the
* red light to reflect and half of the green light to reflect. Used in combination
* with emissive(), specular(), and shininess() in setting
* the materal properties of shapes.
*
* @param {int | float} gray
*
* @returns none
*
* @see emissive
* @see specular
* @see shininess
*/
Drawing2D.prototype.ambient = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.ambient = function(v1, v2, v3) {
curContext.useProgram(programObject3D);
uniformi("usingMat3d", programObject3D, "usingMat", true);
var col = p.color(v1, v2, v3);
uniformf("mat_ambient3d", programObject3D, "mat_ambient", p.color.toGLArray(col).slice(0, 3));
};
/**
* Sets the emissive color of the material used for drawing shapes
* drawn to the screen. Used in combination with ambient(), specular(),
* and shininess() in setting the material properties of shapes.
*
* Can be called in the following ways:
*
* emissive(gray)
* @param {int | float} gray number specifying value between white and black
*
* emissive(color)
* @param {color} color any value of the color datatype
*
* emissive(v1, v2, v3)
* @param {int | float} v1 red or hue value
* @param {int | float} v2 green or saturation value
* @param {int | float} v3 blue or brightness value
*
* @returns none
*
* @see ambient
* @see specular
* @see shininess
*/
Drawing2D.prototype.emissive = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.emissive = function(v1, v2, v3) {
curContext.useProgram(programObject3D);
uniformi("usingMat3d", programObject3D, "usingMat", true);
var col = p.color(v1, v2, v3);
uniformf("mat_emissive3d", programObject3D, "mat_emissive", p.color.toGLArray(col).slice(0, 3));
};
/**
* Sets the amount of gloss in the surface of shapes. Used in combination with
* ambient(), specular(), and emissive() in setting the
* material properties of shapes.
*
* @param {float} shine degree of shininess
*
* @returns none
*/
Drawing2D.prototype.shininess = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.shininess = function(shine) {
curContext.useProgram(programObject3D);
uniformi("usingMat3d", programObject3D, "usingMat", true);
uniformf("shininess3d", programObject3D, "shininess", shine);
};
/**
* Sets the specular color of the materials used for shapes drawn to the screen,
* which sets the color of hightlights. Specular refers to light which bounces
* off a surface in a perferred direction (rather than bouncing in all directions
* like a diffuse light). Used in combination with emissive(), ambient(), and
* shininess() in setting the material properties of shapes.
*
* Can be called in the following ways:
*
* specular(gray)
* @param {int | float} gray number specifying value between white and black
*
* specular(gray, alpha)
* @param {int | float} gray number specifying value between white and black
* @param {int | float} alpha opacity
*
* specular(color)
* @param {color} color any value of the color datatype
*
* specular(v1, v2, v3)
* @param {int | float} v1 red or hue value
* @param {int | float} v2 green or saturation value
* @param {int | float} v3 blue or brightness value
*
* specular(v1, v2, v3, alpha)
* @param {int | float} v1 red or hue value
* @param {int | float} v2 green or saturation value
* @param {int | float} v3 blue or brightness value
* @param {int | float} alpha opacity
*
* @returns none
*
* @see ambient
* @see emissive
* @see shininess
*/
Drawing2D.prototype.specular = DrawingShared.prototype.a3DOnlyFunction;
Drawing3D.prototype.specular = function(v1, v2, v3) {
curContext.useProgram(programObject3D);
uniformi("usingMat3d", programObject3D, "usingMat", true);
var col = p.color(v1, v2, v3);
uniformf("mat_specular3d", programObject3D, "mat_specular", p.color.toGLArray(col).slice(0, 3));
};
////////////////////////////////////////////////////////////////////////////
// Coordinates
////////////////////////////////////////////////////////////////////////////
/**
* Takes a three-dimensional X, Y, Z position and returns the X value for
* where it will appear on a (two-dimensional) screen.
*
* @param {int | float} x 3D x coordinate to be mapped
* @param {int | float} y 3D y coordinate to be mapped
* @param {int | float} z 3D z optional coordinate to be mapped
*
* @returns {float}
*
* @see screenY
* @see screenZ
*/
p.screenX = function( x, y, z ) {
var mv = modelView.array();
if( mv.length === 16 )
{
var ax = mv[ 0]*x + mv[ 1]*y + mv[ 2]*z + mv[ 3];
var ay = mv[ 4]*x + mv[ 5]*y + mv[ 6]*z + mv[ 7];
var az = mv[ 8]*x + mv[ 9]*y + mv[10]*z + mv[11];
var aw = mv[12]*x + mv[13]*y + mv[14]*z + mv[15];
var pj = projection.array();
var ox = pj[ 0]*ax + pj[ 1]*ay + pj[ 2]*az + pj[ 3]*aw;
var ow = pj[12]*ax + pj[13]*ay + pj[14]*az + pj[15]*aw;
if ( ow !== 0 ){
ox /= ow;
}
return p.width * ( 1 + ox ) / 2.0;
}
// We assume that we're in 2D
return modelView.multX(x, y);
};
/**
* Takes a three-dimensional X, Y, Z position and returns the Y value for
* where it will appear on a (two-dimensional) screen.
*
* @param {int | float} x 3D x coordinate to be mapped
* @param {int | float} y 3D y coordinate to be mapped
* @param {int | float} z 3D z optional coordinate to be mapped
*
* @returns {float}
*
* @see screenX
* @see screenZ
*/
p.screenY = function screenY( x, y, z ) {
var mv = modelView.array();
if( mv.length === 16 ) {
var ax = mv[ 0]*x + mv[ 1]*y + mv[ 2]*z + mv[ 3];
var ay = mv[ 4]*x + mv[ 5]*y + mv[ 6]*z + mv[ 7];
var az = mv[ 8]*x + mv[ 9]*y + mv[10]*z + mv[11];
var aw = mv[12]*x + mv[13]*y + mv[14]*z + mv[15];
var pj = projection.array();
var oy = pj[ 4]*ax + pj[ 5]*ay + pj[ 6]*az + pj[ 7]*aw;
var ow = pj[12]*ax + pj[13]*ay + pj[14]*az + pj[15]*aw;
if ( ow !== 0 ){
oy /= ow;
}
return p.height * ( 1 + oy ) / 2.0;
}
// We assume that we're in 2D
return modelView.multY(x, y);
};
/**
* Takes a three-dimensional X, Y, Z position and returns the Z value for
* where it will appear on a (two-dimensional) screen.
*
* @param {int | float} x 3D x coordinate to be mapped
* @param {int | float} y 3D y coordinate to be mapped
* @param {int | float} z 3D z coordinate to be mapped
*
* @returns {float}
*
* @see screenX
* @see screenY
*/
p.screenZ = function screenZ( x, y, z ) {
var mv = modelView.array();
if( mv.length !== 16 ) {
return 0;
}
var pj = projection.array();
var ax = mv[ 0]*x + mv[ 1]*y + mv[ 2]*z + mv[ 3];
var ay = mv[ 4]*x + mv[ 5]*y + mv[ 6]*z + mv[ 7];
var az = mv[ 8]*x + mv[ 9]*y + mv[10]*z + mv[11];
var aw = mv[12]*x + mv[13]*y + mv[14]*z + mv[15];
var oz = pj[ 8]*ax + pj[ 9]*ay + pj[10]*az + pj[11]*aw;
var ow = pj[12]*ax + pj[13]*ay + pj[14]*az + pj[15]*aw;
if ( ow !== 0 ) {
oz /= ow;
}
return ( oz + 1 ) / 2.0;
};
////////////////////////////////////////////////////////////////////////////
// Style functions
////////////////////////////////////////////////////////////////////////////
/**
* The fill() function sets the color used to fill shapes. For example, if you run fill(204, 102, 0), all subsequent shapes will be filled with orange.
* This color is either specified in terms of the RGB or HSB color depending on the current colorMode()
*(the default color space is RGB, with each value in the range from 0 to 255).
*
When using hexadecimal notation to specify a color, use "#" or "0x" before the values (e.g. #CCFFAA, 0xFFCCFFAA).
* The # syntax uses six digits to specify a color (the way colors are specified in HTML and CSS). When using the hexadecimal notation starting with "0x",
* the hexadecimal value must be specified with eight characters; the first two characters define the alpha component and the remainder the red, green, and blue components.
*
The value for the parameter "gray" must be less than or equal to the current maximum value as specified by colorMode(). The default maximum value is 255.
*
To change the color of an image (or a texture), use tint().
*
* @param {int|float} gray number specifying value between white and black
* @param {int|float} value1 red or hue value
* @param {int|float} value2 green or saturation value
* @param {int|float} value3 blue or brightness value
* @param {int|float} alpha opacity of the fill
* @param {Color} color any value of the color datatype
* @param {int} hex color value in hexadecimal notation (i.e. #FFCC00 or 0xFFFFCC00)
*
* @see #noFill()
* @see #stroke()
* @see #tint()
* @see #background()
* @see #colorMode()
*/
DrawingShared.prototype.fill = function() {
var color = p.color(arguments[0], arguments[1], arguments[2], arguments[3]);
if(color === currentFillColor && doFill) {
return;
}
doFill = true;
currentFillColor = color;
};
Drawing2D.prototype.fill = function() {
DrawingShared.prototype.fill.apply(this, arguments);
isFillDirty = true;
};
Drawing3D.prototype.fill = function() {
DrawingShared.prototype.fill.apply(this, arguments);
fillStyle = p.color.toGLArray(currentFillColor);
};
function executeContextFill() {
if(doFill) {
if(isFillDirty) {
curContext.fillStyle = p.color.toString(currentFillColor);
isFillDirty = false;
}
curContext.fill();
}
}
/**
* The noFill() function disables filling geometry. If both noStroke() and noFill()
* are called, no shapes will be drawn to the screen.
*
* @see #fill()
*
*/
p.noFill = function() {
doFill = false;
};
/**
* The stroke() function sets the color used to draw lines and borders around shapes. This color
* is either specified in terms of the RGB or HSB color depending on the
* current colorMode() (the default color space is RGB, with each
* value in the range from 0 to 255).
*
When using hexadecimal notation to specify a color, use "#" or
* "0x" before the values (e.g. #CCFFAA, 0xFFCCFFAA). The # syntax uses six
* digits to specify a color (the way colors are specified in HTML and CSS).
* When using the hexadecimal notation starting with "0x", the hexadecimal
* value must be specified with eight characters; the first two characters
* define the alpha component and the remainder the red, green, and blue
* components.
*
The value for the parameter "gray" must be less than or equal
* to the current maximum value as specified by colorMode().
* The default maximum value is 255.
*
* @param {int|float} gray number specifying value between white and black
* @param {int|float} value1 red or hue value
* @param {int|float} value2 green or saturation value
* @param {int|float} value3 blue or brightness value
* @param {int|float} alpha opacity of the stroke
* @param {Color} color any value of the color datatype
* @param {int} hex color value in hexadecimal notation (i.e. #FFCC00 or 0xFFFFCC00)
*
* @see #fill()
* @see #noStroke()
* @see #tint()
* @see #background()
* @see #colorMode()
*/
DrawingShared.prototype.stroke = function() {
var color = p.color(arguments[0], arguments[1], arguments[2], arguments[3]);
if(color === currentStrokeColor && doStroke) {
return;
}
doStroke = true;
currentStrokeColor = color;
};
Drawing2D.prototype.stroke = function() {
DrawingShared.prototype.stroke.apply(this, arguments);
isStrokeDirty = true;
};
Drawing3D.prototype.stroke = function() {
DrawingShared.prototype.stroke.apply(this, arguments);
strokeStyle = p.color.toGLArray(currentStrokeColor);
};
function executeContextStroke() {
if(doStroke) {
if(isStrokeDirty) {
curContext.strokeStyle = p.color.toString(currentStrokeColor);
isStrokeDirty = false;
}
curContext.stroke();
}
}
/**
* The noStroke() function disables drawing the stroke (outline). If both noStroke() and
* noFill() are called, no shapes will be drawn to the screen.
*
* @see #stroke()
*/
p.noStroke = function() {
doStroke = false;
};
/**
* The strokeWeight() function sets the width of the stroke used for lines, points, and the border around shapes.
* All widths are set in units of pixels.
*
* @param {int|float} w the weight (in pixels) of the stroke
*/
DrawingShared.prototype.strokeWeight = function(w) {
lineWidth = w;
};
Drawing2D.prototype.strokeWeight = function(w) {
DrawingShared.prototype.strokeWeight.apply(this, arguments);
curContext.lineWidth = w;
};
Drawing3D.prototype.strokeWeight = function(w) {
DrawingShared.prototype.strokeWeight.apply(this, arguments);
// Processing groups the weight of points and lines under this one function,
// but for WebGL, we need to set a uniform for points and call a function for line.
curContext.useProgram(programObject2D);
uniformf("pointSize2d", programObject2D, "pointSize", w);
curContext.useProgram(programObjectUnlitShape);
uniformf("pointSizeUnlitShape", programObjectUnlitShape, "pointSize", w);
curContext.lineWidth(w);
};
/**
* The strokeCap() function sets the style for rendering line endings. These ends are either squared, extended, or rounded and
* specified with the corresponding parameters SQUARE, PROJECT, and ROUND. The default cap is ROUND.
* This function is not available with the P2D, P3D, or OPENGL renderers
*
* @param {int} value Either SQUARE, PROJECT, or ROUND
*/
p.strokeCap = function(value) {
drawing.$ensureContext().lineCap = value;
};
/**
* The strokeJoin() function sets the style of the joints which connect line segments.
* These joints are either mitered, beveled, or rounded and specified with the corresponding parameters MITER, BEVEL, and ROUND. The default joint is MITER.
* This function is not available with the P2D, P3D, or OPENGL renderers
*
* @param {int} value Either SQUARE, PROJECT, or ROUND
*/
p.strokeJoin = function(value) {
drawing.$ensureContext().lineJoin = value;
};
/**
* The smooth() function draws all geometry with smooth (anti-aliased) edges. This will slow down the frame rate of the application,
* but will enhance the visual refinement.
* Note that smooth() will also improve image quality of resized images, and noSmooth() will disable image (and font) smoothing altogether.
*
* @see #noSmooth()
* @see #hint()
* @see #size()
*/
Drawing2D.prototype.smooth = function() {
renderSmooth = true;
var style = curElement.style;
style.setProperty("image-rendering", "optimizeQuality", "important");
style.setProperty("-ms-interpolation-mode", "bicubic", "important");
if (curContext.hasOwnProperty("mozImageSmoothingEnabled")) {
curContext.mozImageSmoothingEnabled = true;
}
};
Drawing3D.prototype.smooth = nop;
/**
* The noSmooth() function draws all geometry with jagged (aliased) edges.
*
* @see #smooth()
*/
Drawing2D.prototype.noSmooth = function() {
renderSmooth = false;
var style = curElement.style;
style.setProperty("image-rendering", "optimizeSpeed", "important");
style.setProperty("image-rendering", "-moz-crisp-edges", "important");
style.setProperty("image-rendering", "-webkit-optimize-contrast", "important");
style.setProperty("image-rendering", "optimize-contrast", "important");
style.setProperty("-ms-interpolation-mode", "nearest-neighbor", "important");
if (curContext.hasOwnProperty("mozImageSmoothingEnabled")) {
curContext.mozImageSmoothingEnabled = false;
}
};
Drawing3D.prototype.noSmooth = nop;
////////////////////////////////////////////////////////////////////////////
// Vector drawing functions
////////////////////////////////////////////////////////////////////////////
/**
* The point() function draws a point, a coordinate in space at the dimension of one pixel.
* The first parameter is the horizontal value for the point, the second
* value is the vertical value for the point, and the optional third value
* is the depth value. Drawing this shape in 3D using the z
* parameter requires the P3D or OPENGL parameter in combination with
* size as shown in the above example.
*
* @param {int|float} x x-coordinate of the point
* @param {int|float} y y-coordinate of the point
* @param {int|float} z z-coordinate of the point
*
* @see #beginShape()
*/
Drawing2D.prototype.point = function(x, y) {
if (!doStroke) {
return;
}
if (!renderSmooth) {
x = Math.round(x);
y = Math.round(y);
}
curContext.fillStyle = p.color.toString(currentStrokeColor);
isFillDirty = true;
// Draw a circle for any point larger than 1px
if (lineWidth > 1) {
curContext.beginPath();
curContext.arc(x, y, lineWidth / 2, 0, PConstants.TWO_PI, false);
curContext.fill();
} else {
curContext.fillRect(x, y, 1, 1);
}
};
Drawing3D.prototype.point = function(x, y, z) {
var model = new PMatrix3D();
// move point to position
model.translate(x, y, z || 0);
model.transpose();
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
curContext.useProgram(programObject2D);
uniformMatrix("model2d", programObject2D, "model", false, model.array());
uniformMatrix("view2d", programObject2D, "view", false, view.array());
if (lineWidth > 0 && doStroke) {
// this will be replaced with the new bit shifting color code
uniformf("color2d", programObject2D, "color", strokeStyle);
uniformi("picktype2d", programObject2D, "picktype", 0);
vertexAttribPointer("vertex2d", programObject2D, "Vertex", 3, pointBuffer);
disableVertexAttribPointer("aTextureCoord2d", programObject2D, "aTextureCoord");
curContext.drawArrays(curContext.POINTS, 0, 1);
}
};
/**
* Using the beginShape() and endShape() functions allow creating more complex forms.
* beginShape() begins recording vertices for a shape and endShape() stops recording.
* The value of the MODE parameter tells it which types of shapes to create from the provided vertices.
* With no mode specified, the shape can be any irregular polygon. After calling the beginShape() function,
* a series of vertex() commands must follow. To stop drawing the shape, call endShape().
* The vertex() function with two parameters specifies a position in 2D and the vertex()
* function with three parameters specifies a position in 3D. Each shape will be outlined with the current
* stroke color and filled with the fill color.
*
* @param {int} MODE either POINTS, LINES, TRIANGLES, TRIANGLE_FAN, TRIANGLE_STRIP, QUADS, and QUAD_STRIP.
*
* @see endShape
* @see vertex
* @see curveVertex
* @see bezierVertex
*/
p.beginShape = function(type) {
curShape = type;
curvePoints = [];
vertArray = [];
};
/**
* All shapes are constructed by connecting a series of vertices. vertex() is used to specify the vertex
* coordinates for points, lines, triangles, quads, and polygons and is used exclusively within the beginShape()
* and endShape() function.
Drawing a vertex in 3D using the z parameter requires the P3D or
* OPENGL parameter in combination with size as shown in the above example.
This function is also used to map a
* texture onto the geometry. The texture() function declares the texture to apply to the geometry and the u
* and v coordinates set define the mapping of this texture to the form. By default, the coordinates used for
* u and v are specified in relation to the image's size in pixels, but this relation can be changed with
* textureMode().
*
* @param {int | float} x x-coordinate of the vertex
* @param {int | float} y y-coordinate of the vertex
* @param {int | float} z z-coordinate of the vertex
* @param {int | float} u horizontal coordinate for the texture mapping
* @param {int | float} v vertical coordinate for the texture mapping
*
* @see beginShape
* @see endShape
* @see bezierVertex
* @see curveVertex
* @see texture
*/
Drawing2D.prototype.vertex = function(x, y, u, v) {
var vert = [];
if (firstVert) { firstVert = false; }
vert["isVert"] = true;
vert[0] = x;
vert[1] = y;
vert[2] = 0;
vert[3] = u;
vert[4] = v;
// fill and stroke color
vert[5] = currentFillColor;
vert[6] = currentStrokeColor;
vertArray.push(vert);
};
Drawing3D.prototype.vertex = function(x, y, z, u, v) {
var vert = [];
if (firstVert) { firstVert = false; }
vert["isVert"] = true;
if (v === undef && usingTexture) {
v = u;
u = z;
z = 0;
}
// Convert u and v to normalized coordinates
if (u !== undef && v !== undef) {
if (curTextureMode === PConstants.IMAGE) {
u /= curTexture.width;
v /= curTexture.height;
}
u = u > 1 ? 1 : u;
u = u < 0 ? 0 : u;
v = v > 1 ? 1 : v;
v = v < 0 ? 0 : v;
}
vert[0] = x;
vert[1] = y;
vert[2] = z || 0;
vert[3] = u || 0;
vert[4] = v || 0;
// fill rgba
vert[5] = fillStyle[0];
vert[6] = fillStyle[1];
vert[7] = fillStyle[2];
vert[8] = fillStyle[3];
// stroke rgba
vert[9] = strokeStyle[0];
vert[10] = strokeStyle[1];
vert[11] = strokeStyle[2];
vert[12] = strokeStyle[3];
//normals
vert[13] = normalX;
vert[14] = normalY;
vert[15] = normalZ;
vertArray.push(vert);
};
/**
* @private
* Renders 3D points created from calls to vertex and beginShape/endShape
*
* @param {Array} vArray an array of vertex coordinate
* @param {Array} cArray an array of colours used for the vertices
*
* @see beginShape
* @see endShape
* @see vertex
*/
var point3D = function(vArray, cArray){
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
curContext.useProgram(programObjectUnlitShape);
uniformMatrix("uViewUS", programObjectUnlitShape, "uView", false, view.array());
vertexAttribPointer("aVertexUS", programObjectUnlitShape, "aVertex", 3, pointBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(vArray), curContext.STREAM_DRAW);
vertexAttribPointer("aColorUS", programObjectUnlitShape, "aColor", 4, fillColorBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(cArray), curContext.STREAM_DRAW);
curContext.drawArrays(curContext.POINTS, 0, vArray.length/3);
};
/**
* @private
* Renders 3D lines created from calls to beginShape/vertex/endShape - based on the mode specified LINES, LINE_LOOP, etc.
*
* @param {Array} vArray an array of vertex coordinate
* @param {String} mode either LINES, LINE_LOOP, or LINE_STRIP
* @param {Array} cArray an array of colours used for the vertices
*
* @see beginShape
* @see endShape
* @see vertex
*/
var line3D = function(vArray, mode, cArray){
var ctxMode;
if (mode === "LINES"){
ctxMode = curContext.LINES;
}
else if(mode === "LINE_LOOP"){
ctxMode = curContext.LINE_LOOP;
}
else{
ctxMode = curContext.LINE_STRIP;
}
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
curContext.useProgram(programObjectUnlitShape);
uniformMatrix("uViewUS", programObjectUnlitShape, "uView", false, view.array());
vertexAttribPointer("aVertexUS", programObjectUnlitShape, "aVertex", 3, lineBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(vArray), curContext.STREAM_DRAW);
vertexAttribPointer("aColorUS", programObjectUnlitShape, "aColor", 4, strokeColorBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(cArray), curContext.STREAM_DRAW);
curContext.drawArrays(ctxMode, 0, vArray.length/3);
};
/**
* @private
* Render filled shapes created from calls to beginShape/vertex/endShape - based on the mode specified TRIANGLES, etc.
*
* @param {Array} vArray an array of vertex coordinate
* @param {String} mode either LINES, LINE_LOOP, or LINE_STRIP
* @param {Array} cArray an array of colours used for the vertices
* @param {Array} tArray an array of u,v coordinates for textures
*
* @see beginShape
* @see endShape
* @see vertex
*/
var fill3D = function(vArray, mode, cArray, tArray){
var ctxMode;
if (mode === "TRIANGLES") {
ctxMode = curContext.TRIANGLES;
} else if(mode === "TRIANGLE_FAN") {
ctxMode = curContext.TRIANGLE_FAN;
} else {
ctxMode = curContext.TRIANGLE_STRIP;
}
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
curContext.useProgram( programObject3D );
uniformMatrix("model3d", programObject3D, "model", false, [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1] );
uniformMatrix("view3d", programObject3D, "view", false, view.array() );
curContext.enable( curContext.POLYGON_OFFSET_FILL );
curContext.polygonOffset( 1, 1 );
uniformf("color3d", programObject3D, "color", [-1,0,0,0]);
vertexAttribPointer("vertex3d", programObject3D, "Vertex", 3, fillBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(vArray), curContext.STREAM_DRAW);
// if we are using a texture and a tint, then overwrite the
// contents of the color buffer with the current tint
if (usingTexture && curTint !== null){
curTint3d(cArray);
}
vertexAttribPointer("aColor3d", programObject3D, "aColor", 4, fillColorBuffer);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(cArray), curContext.STREAM_DRAW);
// No support for lights....yet
disableVertexAttribPointer("normal3d", programObject3D, "Normal");
if (usingTexture) {
uniformi("usingTexture3d", programObject3D, "usingTexture", usingTexture);
vertexAttribPointer("aTexture3d", programObject3D, "aTexture", 2, shapeTexVBO);
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(tArray), curContext.STREAM_DRAW);
}
curContext.drawArrays( ctxMode, 0, vArray.length/3 );
curContext.disable( curContext.POLYGON_OFFSET_FILL );
};
/**
* this series of three operations is used a lot in Drawing2D.prototype.endShape
* and has been split off as its own function, to tighten the code and allow for
* fewer bugs.
*/
function fillStrokeClose() {
executeContextFill();
executeContextStroke();
curContext.closePath();
}
/**
* The endShape() function is the companion to beginShape() and may only be called after beginShape().
* When endshape() is called, all of image data defined since the previous call to beginShape() is written
* into the image buffer.
*
* @param {int} MODE Use CLOSE to close the shape
*
* @see beginShape
*/
Drawing2D.prototype.endShape = function(mode) {
// Duplicated in Drawing3D; too many variables used
if (vertArray.length === 0) { return; }
var closeShape = mode === PConstants.CLOSE;
// if the shape is closed, the first element is also the last element
if (closeShape) {
vertArray.push(vertArray[0]);
}
var lineVertArray = [];
var fillVertArray = [];
var colorVertArray = [];
var strokeVertArray = [];
var texVertArray = [];
var cachedVertArray;
firstVert = true;
var i, j, k;
var vertArrayLength = vertArray.length;
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
fillVertArray.push(cachedVertArray[j]);
}
}
// 5,6,7,8
// R,G,B,A - fill colour
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 5; j < 9; j++) {
colorVertArray.push(cachedVertArray[j]);
}
}
// 9,10,11,12
// R, G, B, A - stroke colour
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 9; j < 13; j++) {
strokeVertArray.push(cachedVertArray[j]);
}
}
// texture u,v
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
texVertArray.push(cachedVertArray[3]);
texVertArray.push(cachedVertArray[4]);
}
// curveVertex
if ( isCurve && (curShape === PConstants.POLYGON || curShape === undef) ) {
if (vertArrayLength > 3) {
var b = [],
s = 1 - curTightness;
curContext.beginPath();
curContext.moveTo(vertArray[1][0], vertArray[1][1]);
/*
* Matrix to convert from Catmull-Rom to cubic Bezier
* where t = curTightness
* |0 1 0 0 |
* |(t-1)/6 1 (1-t)/6 0 |
* |0 (1-t)/6 1 (t-1)/6 |
* |0 0 0 0 |
*/
for (i = 1; (i+2) < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
b[0] = [cachedVertArray[0], cachedVertArray[1]];
b[1] = [cachedVertArray[0] + (s * vertArray[i+1][0] - s * vertArray[i-1][0]) / 6,
cachedVertArray[1] + (s * vertArray[i+1][1] - s * vertArray[i-1][1]) / 6];
b[2] = [vertArray[i+1][0] + (s * vertArray[i][0] - s * vertArray[i+2][0]) / 6,
vertArray[i+1][1] + (s * vertArray[i][1] - s * vertArray[i+2][1]) / 6];
b[3] = [vertArray[i+1][0], vertArray[i+1][1]];
curContext.bezierCurveTo(b[1][0], b[1][1], b[2][0], b[2][1], b[3][0], b[3][1]);
}
fillStrokeClose();
}
}
// bezierVertex
else if ( isBezier && (curShape === PConstants.POLYGON || curShape === undef) ) {
curContext.beginPath();
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
if (vertArray[i]["isVert"]) { //if it is a vertex move to the position
if (vertArray[i]["moveTo"]) {
curContext.moveTo(cachedVertArray[0], cachedVertArray[1]);
} else {
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
}
} else { //otherwise continue drawing bezier
curContext.bezierCurveTo(vertArray[i][0], vertArray[i][1], vertArray[i][2], vertArray[i][3], vertArray[i][4], vertArray[i][5]);
}
}
fillStrokeClose();
}
// render the vertices provided
else {
if (curShape === PConstants.POINTS) {
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
if (doStroke) {
p.stroke(cachedVertArray[6]);
}
p.point(cachedVertArray[0], cachedVertArray[1]);
}
} else if (curShape === PConstants.LINES) {
for (i = 0; (i + 1) < vertArrayLength; i+=2) {
cachedVertArray = vertArray[i];
if (doStroke) {
p.stroke(vertArray[i+1][6]);
}
p.line(cachedVertArray[0], cachedVertArray[1], vertArray[i+1][0], vertArray[i+1][1]);
}
} else if (curShape === PConstants.TRIANGLES) {
for (i = 0; (i + 2) < vertArrayLength; i+=3) {
cachedVertArray = vertArray[i];
curContext.beginPath();
curContext.moveTo(cachedVertArray[0], cachedVertArray[1]);
curContext.lineTo(vertArray[i+1][0], vertArray[i+1][1]);
curContext.lineTo(vertArray[i+2][0], vertArray[i+2][1]);
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
if (doFill) {
p.fill(vertArray[i+2][5]);
executeContextFill();
}
if (doStroke) {
p.stroke(vertArray[i+2][6]);
executeContextStroke();
}
curContext.closePath();
}
} else if (curShape === PConstants.TRIANGLE_STRIP) {
for (i = 0; (i+1) < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
curContext.beginPath();
curContext.moveTo(vertArray[i+1][0], vertArray[i+1][1]);
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
if (doStroke) {
p.stroke(vertArray[i+1][6]);
}
if (doFill) {
p.fill(vertArray[i+1][5]);
}
if (i + 2 < vertArrayLength) {
curContext.lineTo(vertArray[i+2][0], vertArray[i+2][1]);
if (doStroke) {
p.stroke(vertArray[i+2][6]);
}
if (doFill) {
p.fill(vertArray[i+2][5]);
}
}
fillStrokeClose();
}
} else if (curShape === PConstants.TRIANGLE_FAN) {
if (vertArrayLength > 2) {
curContext.beginPath();
curContext.moveTo(vertArray[0][0], vertArray[0][1]);
curContext.lineTo(vertArray[1][0], vertArray[1][1]);
curContext.lineTo(vertArray[2][0], vertArray[2][1]);
if (doFill) {
p.fill(vertArray[2][5]);
executeContextFill();
}
if (doStroke) {
p.stroke(vertArray[2][6]);
executeContextStroke();
}
curContext.closePath();
for (i = 3; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
curContext.beginPath();
curContext.moveTo(vertArray[0][0], vertArray[0][1]);
curContext.lineTo(vertArray[i-1][0], vertArray[i-1][1]);
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
if (doFill) {
p.fill(cachedVertArray[5]);
executeContextFill();
}
if (doStroke) {
p.stroke(cachedVertArray[6]);
executeContextStroke();
}
curContext.closePath();
}
}
} else if (curShape === PConstants.QUADS) {
for (i = 0; (i + 3) < vertArrayLength; i+=4) {
cachedVertArray = vertArray[i];
curContext.beginPath();
curContext.moveTo(cachedVertArray[0], cachedVertArray[1]);
for (j = 1; j < 4; j++) {
curContext.lineTo(vertArray[i+j][0], vertArray[i+j][1]);
}
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
if (doFill) {
p.fill(vertArray[i+3][5]);
executeContextFill();
}
if (doStroke) {
p.stroke(vertArray[i+3][6]);
executeContextStroke();
}
curContext.closePath();
}
} else if (curShape === PConstants.QUAD_STRIP) {
if (vertArrayLength > 3) {
for (i = 0; (i+1) < vertArrayLength; i+=2) {
cachedVertArray = vertArray[i];
curContext.beginPath();
if (i+3 < vertArrayLength) {
curContext.moveTo(vertArray[i+2][0], vertArray[i+2][1]);
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
curContext.lineTo(vertArray[i+1][0], vertArray[i+1][1]);
curContext.lineTo(vertArray[i+3][0], vertArray[i+3][1]);
if (doFill) {
p.fill(vertArray[i+3][5]);
}
if (doStroke) {
p.stroke(vertArray[i+3][6]);
}
} else {
curContext.moveTo(cachedVertArray[0], cachedVertArray[1]);
curContext.lineTo(vertArray[i+1][0], vertArray[i+1][1]);
}
fillStrokeClose();
}
}
} else {
curContext.beginPath();
curContext.moveTo(vertArray[0][0], vertArray[0][1]);
for (i = 1; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
if (cachedVertArray["isVert"]) { //if it is a vertex move to the position
if (cachedVertArray["moveTo"]) {
curContext.moveTo(cachedVertArray[0], cachedVertArray[1]);
} else {
curContext.lineTo(cachedVertArray[0], cachedVertArray[1]);
}
}
}
fillStrokeClose();
}
}
// Reset some settings
isCurve = false;
isBezier = false;
curveVertArray = [];
curveVertCount = 0;
// If the shape is closed, the first element was added as last element.
// We must remove it again to prevent the list of vertices from growing
// over successive calls to endShape(CLOSE)
if (closeShape) {
vertArray.pop();
}
};
Drawing3D.prototype.endShape = function(mode) {
// Duplicated in Drawing3D; too many variables used
if (vertArray.length === 0) { return; }
var closeShape = mode === PConstants.CLOSE;
var lineVertArray = [];
var fillVertArray = [];
var colorVertArray = [];
var strokeVertArray = [];
var texVertArray = [];
var cachedVertArray;
firstVert = true;
var i, j, k;
var vertArrayLength = vertArray.length;
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
fillVertArray.push(cachedVertArray[j]);
}
}
// 5,6,7,8
// R,G,B,A - fill colour
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 5; j < 9; j++) {
colorVertArray.push(cachedVertArray[j]);
}
}
// 9,10,11,12
// R, G, B, A - stroke colour
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 9; j < 13; j++) {
strokeVertArray.push(cachedVertArray[j]);
}
}
// texture u,v
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
texVertArray.push(cachedVertArray[3]);
texVertArray.push(cachedVertArray[4]);
}
// if shape is closed, push the first point into the last point (including colours)
if (closeShape) {
fillVertArray.push(vertArray[0][0]);
fillVertArray.push(vertArray[0][1]);
fillVertArray.push(vertArray[0][2]);
for (i = 5; i < 9; i++) {
colorVertArray.push(vertArray[0][i]);
}
for (i = 9; i < 13; i++) {
strokeVertArray.push(vertArray[0][i]);
}
texVertArray.push(vertArray[0][3]);
texVertArray.push(vertArray[0][4]);
}
// End duplication
// curveVertex
if ( isCurve && (curShape === PConstants.POLYGON || curShape === undef) ) {
lineVertArray = fillVertArray;
if (doStroke) {
line3D(lineVertArray, null, strokeVertArray);
}
if (doFill) {
fill3D(fillVertArray, null, colorVertArray);
}
}
// bezierVertex
else if ( isBezier && (curShape === PConstants.POLYGON || curShape === undef) ) {
lineVertArray = fillVertArray;
lineVertArray.splice(lineVertArray.length - 3);
strokeVertArray.splice(strokeVertArray.length - 4);
if (doStroke) {
line3D(lineVertArray, null, strokeVertArray);
}
if (doFill) {
fill3D(fillVertArray, "TRIANGLES", colorVertArray);
}
}
// render the vertices provided
else {
if (curShape === PConstants.POINTS) { // if POINTS was the specified parameter in beginShape
for (i = 0; i < vertArrayLength; i++) { // loop through and push the point location information to the array
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
lineVertArray.push(cachedVertArray[j]);
}
}
point3D(lineVertArray, strokeVertArray); // render function for points
} else if (curShape === PConstants.LINES) { // if LINES was the specified parameter in beginShape
for (i = 0; i < vertArrayLength; i++) { // loop through and push the point location information to the array
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
lineVertArray.push(cachedVertArray[j]);
}
}
for (i = 0; i < vertArrayLength; i++) { // loop through and push the color information to the array
cachedVertArray = vertArray[i];
for (j = 5; j < 9; j++) {
colorVertArray.push(cachedVertArray[j]);
}
}
line3D(lineVertArray, "LINES", strokeVertArray); // render function for lines
} else if (curShape === PConstants.TRIANGLES) { // if TRIANGLES was the specified parameter in beginShape
if (vertArrayLength > 2) {
for (i = 0; (i+2) < vertArrayLength; i+=3) { // loop through the array per triangle
fillVertArray = [];
texVertArray = [];
lineVertArray = [];
colorVertArray = [];
strokeVertArray = [];
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) { // loop through and push
lineVertArray.push(vertArray[i+j][k]); // the line point location information
fillVertArray.push(vertArray[i+j][k]); // and fill point location information
}
}
for (j = 0; j < 3; j++) { // loop through and push the texture information
for (k = 3; k < 5; k++) {
texVertArray.push(vertArray[i+j][k]);
}
}
for (j = 0; j < 3; j++) {
for (k = 5; k < 9; k++) { // loop through and push
colorVertArray.push(vertArray[i+j][k]); // the colour information
strokeVertArray.push(vertArray[i+j][k+4]);// and the stroke information
}
}
if (doStroke) {
line3D(lineVertArray, "LINE_LOOP", strokeVertArray ); // line render function
}
if (doFill || usingTexture) {
fill3D(fillVertArray, "TRIANGLES", colorVertArray, texVertArray); // fill shape render function
}
}
}
} else if (curShape === PConstants.TRIANGLE_STRIP) { // if TRIANGLE_STRIP was the specified parameter in beginShape
if (vertArrayLength > 2) {
for (i = 0; (i+2) < vertArrayLength; i++) {
lineVertArray = [];
fillVertArray = [];
strokeVertArray = [];
colorVertArray = [];
texVertArray = [];
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
lineVertArray.push(vertArray[i+j][k]);
fillVertArray.push(vertArray[i+j][k]);
}
}
for (j = 0; j < 3; j++) {
for (k = 3; k < 5; k++) {
texVertArray.push(vertArray[i+j][k]);
}
}
for (j = 0; j < 3; j++) {
for (k = 5; k < 9; k++) {
strokeVertArray.push(vertArray[i+j][k+4]);
colorVertArray.push(vertArray[i+j][k]);
}
}
if (doFill || usingTexture) {
fill3D(fillVertArray, "TRIANGLE_STRIP", colorVertArray, texVertArray);
}
if (doStroke) {
line3D(lineVertArray, "LINE_LOOP", strokeVertArray);
}
}
}
} else if (curShape === PConstants.TRIANGLE_FAN) {
if (vertArrayLength > 2) {
for (i = 0; i < 3; i++) {
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
lineVertArray.push(cachedVertArray[j]);
}
}
for (i = 0; i < 3; i++) {
cachedVertArray = vertArray[i];
for (j = 9; j < 13; j++) {
strokeVertArray.push(cachedVertArray[j]);
}
}
if (doStroke) {
line3D(lineVertArray, "LINE_LOOP", strokeVertArray);
}
for (i = 2; (i+1) < vertArrayLength; i++) {
lineVertArray = [];
strokeVertArray = [];
lineVertArray.push(vertArray[0][0]);
lineVertArray.push(vertArray[0][1]);
lineVertArray.push(vertArray[0][2]);
strokeVertArray.push(vertArray[0][9]);
strokeVertArray.push(vertArray[0][10]);
strokeVertArray.push(vertArray[0][11]);
strokeVertArray.push(vertArray[0][12]);
for (j = 0; j < 2; j++) {
for (k = 0; k < 3; k++) {
lineVertArray.push(vertArray[i+j][k]);
}
}
for (j = 0; j < 2; j++) {
for (k = 9; k < 13; k++) {
strokeVertArray.push(vertArray[i+j][k]);
}
}
if (doStroke) {
line3D(lineVertArray, "LINE_STRIP",strokeVertArray);
}
}
if (doFill || usingTexture) {
fill3D(fillVertArray, "TRIANGLE_FAN", colorVertArray, texVertArray);
}
}
} else if (curShape === PConstants.QUADS) {
for (i = 0; (i + 3) < vertArrayLength; i+=4) {
lineVertArray = [];
for (j = 0; j < 4; j++) {
cachedVertArray = vertArray[i+j];
for (k = 0; k < 3; k++) {
lineVertArray.push(cachedVertArray[k]);
}
}
if (doStroke) {
line3D(lineVertArray, "LINE_LOOP",strokeVertArray);
}
if (doFill) {
fillVertArray = [];
colorVertArray = [];
texVertArray = [];
for (j = 0; j < 3; j++) {
fillVertArray.push(vertArray[i][j]);
}
for (j = 5; j < 9; j++) {
colorVertArray.push(vertArray[i][j]);
}
for (j = 0; j < 3; j++) {
fillVertArray.push(vertArray[i+1][j]);
}
for (j = 5; j < 9; j++) {
colorVertArray.push(vertArray[i+1][j]);
}
for (j = 0; j < 3; j++) {
fillVertArray.push(vertArray[i+3][j]);
}
for (j = 5; j < 9; j++) {
colorVertArray.push(vertArray[i+3][j]);
}
for (j = 0; j < 3; j++) {
fillVertArray.push(vertArray[i+2][j]);
}
for (j = 5; j < 9; j++) {
colorVertArray.push(vertArray[i+2][j]);
}
if (usingTexture) {
texVertArray.push(vertArray[i+0][3]);
texVertArray.push(vertArray[i+0][4]);
texVertArray.push(vertArray[i+1][3]);
texVertArray.push(vertArray[i+1][4]);
texVertArray.push(vertArray[i+3][3]);
texVertArray.push(vertArray[i+3][4]);
texVertArray.push(vertArray[i+2][3]);
texVertArray.push(vertArray[i+2][4]);
}
fill3D(fillVertArray, "TRIANGLE_STRIP", colorVertArray, texVertArray);
}
}
} else if (curShape === PConstants.QUAD_STRIP) {
var tempArray = [];
if (vertArrayLength > 3) {
for (i = 0; i < 2; i++) {
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
lineVertArray.push(cachedVertArray[j]);
}
}
for (i = 0; i < 2; i++) {
cachedVertArray = vertArray[i];
for (j = 9; j < 13; j++) {
strokeVertArray.push(cachedVertArray[j]);
}
}
line3D(lineVertArray, "LINE_STRIP", strokeVertArray);
if (vertArrayLength > 4 && vertArrayLength % 2 > 0) {
tempArray = fillVertArray.splice(fillVertArray.length - 3);
vertArray.pop();
}
for (i = 0; (i+3) < vertArrayLength; i+=2) {
lineVertArray = [];
strokeVertArray = [];
for (j = 0; j < 3; j++) {
lineVertArray.push(vertArray[i+1][j]);
}
for (j = 0; j < 3; j++) {
lineVertArray.push(vertArray[i+3][j]);
}
for (j = 0; j < 3; j++) {
lineVertArray.push(vertArray[i+2][j]);
}
for (j = 0; j < 3; j++) {
lineVertArray.push(vertArray[i+0][j]);
}
for (j = 9; j < 13; j++) {
strokeVertArray.push(vertArray[i+1][j]);
}
for (j = 9; j < 13; j++) {
strokeVertArray.push(vertArray[i+3][j]);
}
for (j = 9; j < 13; j++) {
strokeVertArray.push(vertArray[i+2][j]);
}
for (j = 9; j < 13; j++) {
strokeVertArray.push(vertArray[i+0][j]);
}
if (doStroke) {
line3D(lineVertArray, "LINE_STRIP", strokeVertArray);
}
}
if (doFill || usingTexture) {
fill3D(fillVertArray, "TRIANGLE_LIST", colorVertArray, texVertArray);
}
}
}
// If the user didn't specify a type (LINES, TRIANGLES, etc)
else {
// If only one vertex was specified, it must be a point
if (vertArrayLength === 1) {
for (j = 0; j < 3; j++) {
lineVertArray.push(vertArray[0][j]);
}
for (j = 9; j < 13; j++) {
strokeVertArray.push(vertArray[0][j]);
}
point3D(lineVertArray,strokeVertArray);
} else {
for (i = 0; i < vertArrayLength; i++) {
cachedVertArray = vertArray[i];
for (j = 0; j < 3; j++) {
lineVertArray.push(cachedVertArray[j]);
}
for (j = 5; j < 9; j++) {
strokeVertArray.push(cachedVertArray[j]);
}
}
if (doStroke && closeShape) {
line3D(lineVertArray, "LINE_LOOP", strokeVertArray);
} else if (doStroke && !closeShape) {
line3D(lineVertArray, "LINE_STRIP", strokeVertArray);
}
// fill is ignored if textures are used
if (doFill || usingTexture) {
fill3D(fillVertArray, "TRIANGLE_FAN", colorVertArray, texVertArray);
}
}
}
// everytime beginShape is followed by a call to
// texture(), texturing it turned back on. We do this to
// figure out if the shape should be textured or filled
// with a color.
usingTexture = false;
curContext.useProgram(programObject3D);
uniformi("usingTexture3d", programObject3D, "usingTexture", usingTexture);
}
// Reset some settings
isCurve = false;
isBezier = false;
curveVertArray = [];
curveVertCount = 0;
};
/**
* The function splineForward() setup forward-differencing matrix to be used for speedy
* curve rendering. It's based on using a specific number
* of curve segments and just doing incremental adds for each
* vertex of the segment, rather than running the mathematically
* expensive cubic equation. This function is used by both curveDetail and bezierDetail.
*
* @param {int} segments number of curve segments to use when drawing
* @param {PMatrix3D} matrix target object for the new matrix
*/
var splineForward = function(segments, matrix) {
var f = 1.0 / segments;
var ff = f * f;
var fff = ff * f;
matrix.set(0, 0, 0, 1, fff, ff, f, 0, 6 * fff, 2 * ff, 0, 0, 6 * fff, 0, 0, 0);
};
/**
* The curveInit() function set the number of segments to use when drawing a Catmull-Rom
* curve, and setting the s parameter, which defines how tightly
* the curve fits to each vertex. Catmull-Rom curves are actually
* a subset of this curve type where the s is set to zero.
* This in an internal function used by curveDetail() and curveTightness().
*/
var curveInit = function() {
// allocate only if/when used to save startup time
if (!curveDrawMatrix) {
curveBasisMatrix = new PMatrix3D();
curveDrawMatrix = new PMatrix3D();
curveInited = true;
}
var s = curTightness;
curveBasisMatrix.set((s - 1) / 2, (s + 3) / 2, (-3 - s) / 2, (1 - s) / 2,
(1 - s), (-5 - s) / 2, (s + 2), (s - 1) / 2,
(s - 1) / 2, 0, (1 - s) / 2, 0, 0, 1, 0, 0);
splineForward(curveDet, curveDrawMatrix);
if (!bezierBasisInverse) {
//bezierBasisInverse = bezierBasisMatrix.get();
//bezierBasisInverse.invert();
curveToBezierMatrix = new PMatrix3D();
}
// TODO only needed for PGraphicsJava2D? if so, move it there
// actually, it's generally useful for other renderers, so keep it
// or hide the implementation elsewhere.
curveToBezierMatrix.set(curveBasisMatrix);
curveToBezierMatrix.preApply(bezierBasisInverse);
// multiply the basis and forward diff matrices together
// saves much time since this needn't be done for each curve
curveDrawMatrix.apply(curveBasisMatrix);
};
/**
* Specifies vertex coordinates for Bezier curves. Each call to bezierVertex() defines the position of two control
* points and one anchor point of a Bezier curve, adding a new segment to a line or shape. The first time
* bezierVertex() is used within a beginShape() call, it must be prefaced with a call to vertex()
* to set the first anchor point. This function must be used between beginShape() and endShape() and only
* when there is no MODE parameter specified to beginShape(). Using the 3D version of requires rendering with P3D
* or OPENGL (see the Environment reference for more information).
NOTE: Fill does not work properly yet.
*
* @param {float | int} cx1 The x-coordinate of 1st control point
* @param {float | int} cy1 The y-coordinate of 1st control point
* @param {float | int} cz1 The z-coordinate of 1st control point
* @param {float | int} cx2 The x-coordinate of 2nd control point
* @param {float | int} cy2 The y-coordinate of 2nd control point
* @param {float | int} cz2 The z-coordinate of 2nd control point
* @param {float | int} x The x-coordinate of the anchor point
* @param {float | int} y The y-coordinate of the anchor point
* @param {float | int} z The z-coordinate of the anchor point
*
* @see curveVertex
* @see vertex
* @see bezier
*/
Drawing2D.prototype.bezierVertex = function() {
isBezier = true;
var vert = [];
if (firstVert) {
throw ("vertex() must be used at least once before calling bezierVertex()");
}
for (var i = 0; i < arguments.length; i++) {
vert[i] = arguments[i];
}
vertArray.push(vert);
vertArray[vertArray.length -1]["isVert"] = false;
};
Drawing3D.prototype.bezierVertex = function() {
isBezier = true;
var vert = [];
if (firstVert) {
throw ("vertex() must be used at least once before calling bezierVertex()");
}
if (arguments.length === 9) {
if (bezierDrawMatrix === undef) {
bezierDrawMatrix = new PMatrix3D();
}
// setup matrix for forward differencing to speed up drawing
var lastPoint = vertArray.length - 1;
splineForward( bezDetail, bezierDrawMatrix );
bezierDrawMatrix.apply( bezierBasisMatrix );
var draw = bezierDrawMatrix.array();
var x1 = vertArray[lastPoint][0],
y1 = vertArray[lastPoint][1],
z1 = vertArray[lastPoint][2];
var xplot1 = draw[4] * x1 + draw[5] * arguments[0] + draw[6] * arguments[3] + draw[7] * arguments[6];
var xplot2 = draw[8] * x1 + draw[9] * arguments[0] + draw[10]* arguments[3] + draw[11]* arguments[6];
var xplot3 = draw[12]* x1 + draw[13]* arguments[0] + draw[14]* arguments[3] + draw[15]* arguments[6];
var yplot1 = draw[4] * y1 + draw[5] * arguments[1] + draw[6] * arguments[4] + draw[7] * arguments[7];
var yplot2 = draw[8] * y1 + draw[9] * arguments[1] + draw[10]* arguments[4] + draw[11]* arguments[7];
var yplot3 = draw[12]* y1 + draw[13]* arguments[1] + draw[14]* arguments[4] + draw[15]* arguments[7];
var zplot1 = draw[4] * z1 + draw[5] * arguments[2] + draw[6] * arguments[5] + draw[7] * arguments[8];
var zplot2 = draw[8] * z1 + draw[9] * arguments[2] + draw[10]* arguments[5] + draw[11]* arguments[8];
var zplot3 = draw[12]* z1 + draw[13]* arguments[2] + draw[14]* arguments[5] + draw[15]* arguments[8];
for (var j = 0; j < bezDetail; j++) {
x1 += xplot1; xplot1 += xplot2; xplot2 += xplot3;
y1 += yplot1; yplot1 += yplot2; yplot2 += yplot3;
z1 += zplot1; zplot1 += zplot2; zplot2 += zplot3;
p.vertex(x1, y1, z1);
}
p.vertex(arguments[6], arguments[7], arguments[8]);
}
};
/**
* Sets a texture to be applied to vertex points. The texture() function
* must be called between beginShape() and endShape() and before
* any calls to vertex().
*
* When textures are in use, the fill color is ignored. Instead, use tint() to
* specify the color of the texture as it is applied to the shape.
*
* @param {PImage} pimage the texture to apply
*
* @returns none
*
* @see textureMode
* @see beginShape
* @see endShape
* @see vertex
*/
p.texture = function(pimage) {
var curContext = drawing.$ensureContext();
if (pimage.__texture) {
curContext.bindTexture(curContext.TEXTURE_2D, pimage.__texture);
} else if (pimage.localName === "canvas") {
curContext.bindTexture(curContext.TEXTURE_2D, canTex);
curContext.texImage2D(curContext.TEXTURE_2D, 0, curContext.RGBA, curContext.RGBA, curContext.UNSIGNED_BYTE, pimage);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_MAG_FILTER, curContext.LINEAR);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_MIN_FILTER, curContext.LINEAR);
curContext.generateMipmap(curContext.TEXTURE_2D);
curTexture.width = pimage.width;
curTexture.height = pimage.height;
} else {
var texture = curContext.createTexture(),
cvs = document.createElement('canvas'),
cvsTextureCtx = cvs.getContext('2d'),
pot;
// WebGL requires power of two textures
if (pimage.width & (pimage.width-1) === 0) {
cvs.width = pimage.width;
} else {
pot = 1;
while (pot < pimage.width) {
pot *= 2;
}
cvs.width = pot;
}
if (pimage.height & (pimage.height-1) === 0) {
cvs.height = pimage.height;
} else {
pot = 1;
while (pot < pimage.height) {
pot *= 2;
}
cvs.height = pot;
}
cvsTextureCtx.drawImage(pimage.sourceImg, 0, 0, pimage.width, pimage.height, 0, 0, cvs.width, cvs.height);
curContext.bindTexture(curContext.TEXTURE_2D, texture);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_MIN_FILTER, curContext.LINEAR_MIPMAP_LINEAR);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_MAG_FILTER, curContext.LINEAR);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_WRAP_T, curContext.CLAMP_TO_EDGE);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_WRAP_S, curContext.CLAMP_TO_EDGE);
curContext.texImage2D(curContext.TEXTURE_2D, 0, curContext.RGBA, curContext.RGBA, curContext.UNSIGNED_BYTE, cvs);
curContext.generateMipmap(curContext.TEXTURE_2D);
pimage.__texture = texture;
curTexture.width = pimage.width;
curTexture.height = pimage.height;
}
usingTexture = true;
curContext.useProgram(programObject3D);
uniformi("usingTexture3d", programObject3D, "usingTexture", usingTexture);
};
/**
* Sets the coordinate space for texture mapping. There are two options, IMAGE,
* which refers to the actual coordinates of the image, and NORMALIZED, which
* refers to a normalized space of values ranging from 0 to 1. The default mode
* is IMAGE. In IMAGE, if an image is 100 x 200 pixels, mapping the image onto
* the entire size of a quad would require the points (0,0) (0,100) (100,200) (0,200).
* The same mapping in NORMAL_SPACE is (0,0) (0,1) (1,1) (0,1).
*
* @param MODE either IMAGE or NORMALIZED
*
* @returns none
*
* @see texture
*/
p.textureMode = function(mode){
curTextureMode = mode;
};
/**
* The curveVertexSegment() function handle emitting a specific segment of Catmull-Rom curve. Internal helper function used by curveVertex().
*/
var curveVertexSegment = function(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4) {
var x0 = x2;
var y0 = y2;
var z0 = z2;
var draw = curveDrawMatrix.array();
var xplot1 = draw[4] * x1 + draw[5] * x2 + draw[6] * x3 + draw[7] * x4;
var xplot2 = draw[8] * x1 + draw[9] * x2 + draw[10] * x3 + draw[11] * x4;
var xplot3 = draw[12] * x1 + draw[13] * x2 + draw[14] * x3 + draw[15] * x4;
var yplot1 = draw[4] * y1 + draw[5] * y2 + draw[6] * y3 + draw[7] * y4;
var yplot2 = draw[8] * y1 + draw[9] * y2 + draw[10] * y3 + draw[11] * y4;
var yplot3 = draw[12] * y1 + draw[13] * y2 + draw[14] * y3 + draw[15] * y4;
var zplot1 = draw[4] * z1 + draw[5] * z2 + draw[6] * z3 + draw[7] * z4;
var zplot2 = draw[8] * z1 + draw[9] * z2 + draw[10] * z3 + draw[11] * z4;
var zplot3 = draw[12] * z1 + draw[13] * z2 + draw[14] * z3 + draw[15] * z4;
p.vertex(x0, y0, z0);
for (var j = 0; j < curveDet; j++) {
x0 += xplot1; xplot1 += xplot2; xplot2 += xplot3;
y0 += yplot1; yplot1 += yplot2; yplot2 += yplot3;
z0 += zplot1; zplot1 += zplot2; zplot2 += zplot3;
p.vertex(x0, y0, z0);
}
};
/**
* Specifies vertex coordinates for curves. This function may only be used between beginShape() and
* endShape() and only when there is no MODE parameter specified to beginShape(). The first and last points
* in a series of curveVertex() lines will be used to guide the beginning and end of a the curve. A minimum of four
* points is required to draw a tiny curve between the second and third points. Adding a fifth point with
* curveVertex() will draw the curve between the second, third, and fourth points. The curveVertex() function
* is an implementation of Catmull-Rom splines. Using the 3D version of requires rendering with P3D or OPENGL (see the
* Environment reference for more information).
NOTE: Fill does not work properly yet.
*
* @param {float | int} x The x-coordinate of the vertex
* @param {float | int} y The y-coordinate of the vertex
* @param {float | int} z The z-coordinate of the vertex
*
* @see curve
* @see beginShape
* @see endShape
* @see vertex
* @see bezierVertex
*/
Drawing2D.prototype.curveVertex = function(x, y) {
isCurve = true;
p.vertex(x, y);
};
Drawing3D.prototype.curveVertex = function(x, y, z) {
isCurve = true;
if (!curveInited) {
curveInit();
}
var vert = [];
vert[0] = x;
vert[1] = y;
vert[2] = z;
curveVertArray.push(vert);
curveVertCount++;
if (curveVertCount > 3) {
curveVertexSegment( curveVertArray[curveVertCount-4][0],
curveVertArray[curveVertCount-4][1],
curveVertArray[curveVertCount-4][2],
curveVertArray[curveVertCount-3][0],
curveVertArray[curveVertCount-3][1],
curveVertArray[curveVertCount-3][2],
curveVertArray[curveVertCount-2][0],
curveVertArray[curveVertCount-2][1],
curveVertArray[curveVertCount-2][2],
curveVertArray[curveVertCount-1][0],
curveVertArray[curveVertCount-1][1],
curveVertArray[curveVertCount-1][2] );
}
};
/**
* The curve() function draws a curved line on the screen. The first and second parameters
* specify the beginning control point and the last two parameters specify
* the ending control point. The middle parameters specify the start and
* stop of the curve. Longer curves can be created by putting a series of
* curve() functions together or using curveVertex().
* An additional function called curveTightness() provides control
* for the visual quality of the curve. The curve() function is an
* implementation of Catmull-Rom splines. Using the 3D version of requires
* rendering with P3D or OPENGL (see the Environment reference for more
* information).
*
* @param {int|float} x1 coordinates for the beginning control point
* @param {int|float} y1 coordinates for the beginning control point
* @param {int|float} z1 coordinates for the beginning control point
* @param {int|float} x2 coordinates for the first point
* @param {int|float} y2 coordinates for the first point
* @param {int|float} z2 coordinates for the first point
* @param {int|float} x3 coordinates for the second point
* @param {int|float} y3 coordinates for the second point
* @param {int|float} z3 coordinates for the second point
* @param {int|float} x4 coordinates for the ending control point
* @param {int|float} y4 coordinates for the ending control point
* @param {int|float} z4 coordinates for the ending control point
*
* @see #curveVertex()
* @see #curveTightness()
* @see #bezier()
*/
Drawing2D.prototype.curve = function() {
if (arguments.length === 8) { // curve(x1, y1, x2, y2, x3, y3, x4, y4)
p.beginShape();
p.curveVertex(arguments[0], arguments[1]);
p.curveVertex(arguments[2], arguments[3]);
p.curveVertex(arguments[4], arguments[5]);
p.curveVertex(arguments[6], arguments[7]);
p.endShape();
}
};
Drawing3D.prototype.curve = function() {
if (arguments.length === 12) { // curve( x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4);
p.beginShape();
p.curveVertex(arguments[0], arguments[1], arguments[2]);
p.curveVertex(arguments[3], arguments[4], arguments[5]);
p.curveVertex(arguments[6], arguments[7], arguments[8]);
p.curveVertex(arguments[9], arguments[10], arguments[11]);
p.endShape();
}
};
/**
* The curveTightness() function modifies the quality of forms created with curve() and
* curveVertex(). The parameter squishy determines how the
* curve fits to the vertex points. The value 0.0 is the default value for
* squishy (this value defines the curves to be Catmull-Rom splines)
* and the value 1.0 connects all the points with straight lines.
* Values within the range -5.0 and 5.0 will deform the curves but
* will leave them recognizable and as values increase in magnitude,
* they will continue to deform.
*
* @param {float} tightness amount of deformation from the original vertices
*
* @see #curve()
* @see #curveVertex()
*
*/
p.curveTightness = function(tightness) {
curTightness = tightness;
};
/**
* The curveDetail() function sets the resolution at which curves display. The default value is 20.
* This function is only useful when using the P3D or OPENGL renderer.
*
* @param {int} detail resolution of the curves
*
* @see curve()
* @see curveVertex()
* @see curveTightness()
*/
p.curveDetail = function(detail) {
curveDet = detail;
curveInit();
};
/**
* Modifies the location from which rectangles draw. The default mode is rectMode(CORNER), which
* specifies the location to be the upper left corner of the shape and uses the third and fourth
* parameters of rect() to specify the width and height. The syntax rectMode(CORNERS) uses the
* first and second parameters of rect() to set the location of one corner and uses the third and
* fourth parameters to set the opposite corner. The syntax rectMode(CENTER) draws the image from
* its center point and uses the third and forth parameters of rect() to specify the image's width
* and height. The syntax rectMode(RADIUS) draws the image from its center point and uses the third
* and forth parameters of rect() to specify half of the image's width and height. The parameter must
* be written in ALL CAPS because Processing is a case sensitive language. Note: In version 125, the
* mode named CENTER_RADIUS was shortened to RADIUS.
*
* @param {MODE} MODE Either CORNER, CORNERS, CENTER, or RADIUS
*
* @see rect
*/
p.rectMode = function(aRectMode) {
curRectMode = aRectMode;
};
/**
* Modifies the location from which images draw. The default mode is imageMode(CORNER), which specifies
* the location to be the upper left corner and uses the fourth and fifth parameters of image() to set
* the image's width and height. The syntax imageMode(CORNERS) uses the second and third parameters of
* image() to set the location of one corner of the image and uses the fourth and fifth parameters to
* set the opposite corner. Use imageMode(CENTER) to draw images centered at the given x and y position.
* The parameter to imageMode() must be written in ALL CAPS because Processing is a case sensitive language.
*
* @param {MODE} MODE Either CORNER, CORNERS, or CENTER
*
* @see loadImage
* @see PImage
* @see image
* @see background
*/
p.imageMode = function(mode) {
switch (mode) {
case PConstants.CORNER:
imageModeConvert = imageModeCorner;
break;
case PConstants.CORNERS:
imageModeConvert = imageModeCorners;
break;
case PConstants.CENTER:
imageModeConvert = imageModeCenter;
break;
default:
throw "Invalid imageMode";
}
};
/**
* The origin of the ellipse is modified by the ellipseMode() function. The default configuration is
* ellipseMode(CENTER), which specifies the location of the ellipse as the center of the shape. The RADIUS
* mode is the same, but the width and height parameters to ellipse() specify the radius of the ellipse,
* rather than the diameter. The CORNER mode draws the shape from the upper-left corner of its bounding box.
* The CORNERS mode uses the four parameters to ellipse() to set two opposing corners of the ellipse's bounding
* box. The parameter must be written in "ALL CAPS" because Processing is a case sensitive language.
*
* @param {MODE} MODE Either CENTER, RADIUS, CORNER, or CORNERS.
*
* @see ellipse
*/
p.ellipseMode = function(aEllipseMode) {
curEllipseMode = aEllipseMode;
};
/**
* The arc() function draws an arc in the display window.
* Arcs are drawn along the outer edge of an ellipse defined by the
* x, y, width and height parameters.
* The origin or the arc's ellipse may be changed with the
* ellipseMode() function.
* The start and stop parameters specify the angles
* at which to draw the arc.
*
* @param {float} a x-coordinate of the arc's ellipse
* @param {float} b y-coordinate of the arc's ellipse
* @param {float} c width of the arc's ellipse
* @param {float} d height of the arc's ellipse
* @param {float} start angle to start the arc, specified in radians
* @param {float} stop angle to stop the arc, specified in radians
*
* @see #ellipseMode()
* @see #ellipse()
*/
p.arc = function(x, y, width, height, start, stop) {
if (width <= 0 || stop < start) { return; }
// XXX(jeresig)
start = p.convertToRadians(start);
stop = p.convertToRadians(stop);
if (curEllipseMode === PConstants.CORNERS) {
width = width - x;
height = height - y;
} else if (curEllipseMode === PConstants.RADIUS) {
x = x - width;
y = y - height;
width = width * 2;
height = height * 2;
} else if (curEllipseMode === PConstants.CENTER) {
x = x - width/2;
y = y - height/2;
}
// make sure that we're starting at a useful point
while (start < 0) {
start += PConstants.TWO_PI;
stop += PConstants.TWO_PI;
}
if (stop - start > PConstants.TWO_PI) {
start = 0;
stop = PConstants.TWO_PI;
}
var hr = width / 2;
var vr = height / 2;
var centerX = x + hr;
var centerY = y + vr;
// XXX(jeresig): Removed * 2 from these lines
// seems to have been a mistake.
var startLUT = 0 | (-0.5 + start * p.RAD_TO_DEG);
var stopLUT = 0 | (0.5 + stop * p.RAD_TO_DEG);
var i, j;
if (doFill) {
// shut off stroke for a minute
var savedStroke = doStroke;
doStroke = false;
p.beginShape();
p.vertex(centerX, centerY);
for (i = startLUT; i <= stopLUT; i++) {
j = i % PConstants.SINCOS_LENGTH;
p.vertex(centerX + cosLUT[j] * hr, centerY + sinLUT[j] * vr);
}
p.endShape(PConstants.CLOSE);
doStroke = savedStroke;
}
if (doStroke) {
// and doesn't include the first (center) vertex.
var savedFill = doFill;
doFill = false;
p.beginShape();
for (i = startLUT; i <= stopLUT; i++) {
j = i % PConstants.SINCOS_LENGTH;
p.vertex(centerX + cosLUT[j] * hr, centerY + sinLUT[j] * vr);
}
p.endShape();
doFill = savedFill;
}
};
/**
* Draws a line (a direct path between two points) to the screen. The version of line() with four parameters
* draws the line in 2D. To color a line, use the stroke() function. A line cannot be filled, therefore the
* fill() method will not affect the color of a line. 2D lines are drawn with a width of one pixel by default,
* but this can be changed with the strokeWeight() function. The version with six parameters allows the line
* to be placed anywhere within XYZ space. Drawing this shape in 3D using the z parameter requires the P3D or
* OPENGL parameter in combination with size.
*
* @param {int|float} x1 x-coordinate of the first point
* @param {int|float} y1 y-coordinate of the first point
* @param {int|float} z1 z-coordinate of the first point
* @param {int|float} x2 x-coordinate of the second point
* @param {int|float} y2 y-coordinate of the second point
* @param {int|float} z2 z-coordinate of the second point
*
* @see strokeWeight
* @see strokeJoin
* @see strokeCap
* @see beginShape
*/
Drawing2D.prototype.line = function(x1, y1, x2, y2) {
if (!doStroke) {
return;
}
if (!renderSmooth) {
x1 = Math.round(x1);
x2 = Math.round(x2);
y1 = Math.round(y1);
y2 = Math.round(y2);
}
// A line is only defined if it has different start and end coordinates.
// If they are the same, we call point instead.
if (x1 === x2 && y1 === y2) {
p.point(x1, y1);
return;
}
var swap = undef,
lineCap = undef,
drawCrisp = true,
currentModelView = modelView.array(),
identityMatrix = [1, 0, 0, 0, 1, 0];
// Test if any transformations have been applied to the sketch
for (var i = 0; i < 6 && drawCrisp; i++) {
drawCrisp = currentModelView[i] === identityMatrix[i];
}
/* Draw crisp lines if the line is vertical or horizontal with the following method
* If any transformations have been applied to the sketch, don't make the line crisp
* If the line is directed up or to the left, reverse it by swapping x1/x2 or y1/y2
* Make the line 1 pixel longer to work around cross-platform canvas implementations
* If the lineWidth is odd, translate the line by 0.5 in the perpendicular direction
* Even lineWidths do not need to be translated because the canvas will draw them on pixel boundaries
* Change the cap to butt-end to work around cross-platform canvas implementations
* Reverse the translate and lineCap canvas state changes after drawing the line
*/
if (drawCrisp) {
if (x1 === x2) {
if (y1 > y2) {
swap = y1;
y1 = y2;
y2 = swap;
}
y2++;
if (lineWidth % 2 === 1) {
curContext.translate(0.5, 0.0);
}
} else if (y1 === y2) {
if (x1 > x2) {
swap = x1;
x1 = x2;
x2 = swap;
}
x2++;
if (lineWidth % 2 === 1) {
curContext.translate(0.0, 0.5);
}
}
if (lineWidth === 1) {
lineCap = curContext.lineCap;
curContext.lineCap = 'butt';
}
}
curContext.beginPath();
curContext.moveTo(x1 || 0, y1 || 0);
curContext.lineTo(x2 || 0, y2 || 0);
executeContextStroke();
if (drawCrisp) {
if (x1 === x2 && lineWidth % 2 === 1) {
curContext.translate(-0.5, 0.0);
} else if (y1 === y2 && lineWidth % 2 === 1) {
curContext.translate(0.0, -0.5);
}
if (lineWidth === 1) {
curContext.lineCap = lineCap;
}
}
};
Drawing3D.prototype.line = function(x1, y1, z1, x2, y2, z2) {
if (y2 === undef || z2 === undef) { // 2D line called in 3D context
z2 = 0;
y2 = x2;
x2 = z1;
z1 = 0;
}
// a line is only defined if it has different start and end coordinates.
// If they are the same, we call point instead.
if (x1===x2 && y1===y2 && z1===z2) {
p.point(x1,y1,z1);
return;
}
var lineVerts = [x1, y1, z1, x2, y2, z2];
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
if (lineWidth > 0 && doStroke) {
curContext.useProgram(programObject2D);
uniformMatrix("model2d", programObject2D, "model", false, [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]);
uniformMatrix("view2d", programObject2D, "view", false, view.array());
uniformf("color2d", programObject2D, "color", strokeStyle);
uniformi("picktype2d", programObject2D, "picktype", 0);
vertexAttribPointer("vertex2d", programObject2D, "Vertex", 3, lineBuffer);
disableVertexAttribPointer("aTextureCoord2d", programObject2D, "aTextureCoord");
curContext.bufferData(curContext.ARRAY_BUFFER, new Float32Array(lineVerts), curContext.STREAM_DRAW);
curContext.drawArrays(curContext.LINES, 0, 2);
}
};
/**
* Draws a Bezier curve on the screen. These curves are defined by a series of anchor and control points. The first
* two parameters specify the first anchor point and the last two parameters specify the other anchor point. The
* middle parameters specify the control points which define the shape of the curve. Bezier curves were developed
* by French engineer Pierre Bezier. Using the 3D version of requires rendering with P3D or OPENGL (see the
* Environment reference for more information).
*
* @param {int | float} x1,y1,z1 coordinates for the first anchor point
* @param {int | float} cx1,cy1,cz1 coordinates for the first control point
* @param {int | float} cx2,cy2,cz2 coordinates for the second control point
* @param {int | float} x2,y2,z2 coordinates for the second anchor point
*
* @see bezierVertex
* @see curve
*/
Drawing2D.prototype.bezier = function() {
if (arguments.length !== 8) {
throw("You must use 8 parameters for bezier() in 2D mode");
}
p.beginShape();
p.vertex( arguments[0], arguments[1] );
p.bezierVertex( arguments[2], arguments[3],
arguments[4], arguments[5],
arguments[6], arguments[7] );
p.endShape();
};
Drawing3D.prototype.bezier = function() {
if (arguments.length !== 12) {
throw("You must use 12 parameters for bezier() in 3D mode");
}
p.beginShape();
p.vertex( arguments[0], arguments[1], arguments[2] );
p.bezierVertex( arguments[3], arguments[4], arguments[5],
arguments[6], arguments[7], arguments[8],
arguments[9], arguments[10], arguments[11] );
p.endShape();
};
/**
* Sets the resolution at which Beziers display. The default value is 20. This function is only useful when using the P3D
* or OPENGL renderer as the default (JAVA2D) renderer does not use this information.
*
* @param {int} detail resolution of the curves
*
* @see curve
* @see curveVertex
* @see curveTightness
*/
p.bezierDetail = function( detail ){
bezDetail = detail;
};
/**
* The bezierPoint() function evalutes quadratic bezier at point t for points a, b, c, d.
* The parameter t varies between 0 and 1. The a and d parameters are the
* on-curve points, b and c are the control points. To make a two-dimensional
* curve, call this function once with the x coordinates and a second time
* with the y coordinates to get the location of a bezier curve at t.
*
* @param {float} a coordinate of first point on the curve
* @param {float} b coordinate of first control point
* @param {float} c coordinate of second control point
* @param {float} d coordinate of second point on the curve
* @param {float} t value between 0 and 1
*
* @see #bezier()
* @see #bezierVertex()
* @see #curvePoint()
*/
p.bezierPoint = function(a, b, c, d, t) {
return (1 - t) * (1 - t) * (1 - t) * a + 3 * (1 - t) * (1 - t) * t * b + 3 * (1 - t) * t * t * c + t * t * t * d;
};
/**
* The bezierTangent() function calculates the tangent of a point on a Bezier curve. There is a good
* definition of "tangent" at Wikipedia: http://en.wikipedia.org/wiki/Tangent
*
* @param {float} a coordinate of first point on the curve
* @param {float} b coordinate of first control point
* @param {float} c coordinate of second control point
* @param {float} d coordinate of second point on the curve
* @param {float} t value between 0 and 1
*
* @see #bezier()
* @see #bezierVertex()
* @see #curvePoint()
*/
p.bezierTangent = function(a, b, c, d, t) {
return (3 * t * t * (-a + 3 * b - 3 * c + d) + 6 * t * (a - 2 * b + c) + 3 * (-a + b));
};
/**
* The curvePoint() function evalutes the Catmull-Rom curve at point t for points a, b, c, d. The
* parameter t varies between 0 and 1, a and d are points on the curve,
* and b and c are the control points. This can be done once with the x
* coordinates and a second time with the y coordinates to get the
* location of a curve at t.
*
* @param {int|float} a coordinate of first point on the curve
* @param {int|float} b coordinate of second point on the curve
* @param {int|float} c coordinate of third point on the curve
* @param {int|float} d coordinate of fourth point on the curve
* @param {float} t value between 0 and 1
*
* @see #curve()
* @see #curveVertex()
* @see #bezierPoint()
*/
p.curvePoint = function(a, b, c, d, t) {
return 0.5 * ((2 * b) + (-a + c) * t + (2 * a - 5 * b + 4 * c - d) * t * t + (-a + 3 * b - 3 * c + d) * t * t * t);
};
/**
* The curveTangent() function calculates the tangent of a point on a Catmull-Rom curve.
* There is a good definition of "tangent" at Wikipedia: http://en.wikipedia.org/wiki/Tangent.
*
* @param {int|float} a coordinate of first point on the curve
* @param {int|float} b coordinate of first control point
* @param {int|float} c coordinate of second control point
* @param {int|float} d coordinate of second point on the curve
* @param {float} t value between 0 and 1
*
* @see #curve()
* @see #curveVertex()
* @see #curvePoint()
* @see #bezierTangent()
*/
p.curveTangent = function(a, b, c, d, t) {
return 0.5 * ((-a + c) + 2 * (2 * a - 5 * b + 4 * c - d) * t + 3 * (-a + 3 * b - 3 * c + d) * t * t);
};
/**
* A triangle is a plane created by connecting three points. The first two arguments specify the first point,
* the middle two arguments specify the second point, and the last two arguments specify the third point.
*
* @param {int | float} x1 x-coordinate of the first point
* @param {int | float} y1 y-coordinate of the first point
* @param {int | float} x2 x-coordinate of the second point
* @param {int | float} y2 y-coordinate of the second point
* @param {int | float} x3 x-coordinate of the third point
* @param {int | float} y3 y-coordinate of the third point
*/
p.triangle = function(x1, y1, x2, y2, x3, y3) {
p.beginShape(PConstants.TRIANGLES);
p.vertex(x1, y1, 0);
p.vertex(x2, y2, 0);
p.vertex(x3, y3, 0);
p.endShape();
};
/**
* A quad is a quadrilateral, a four sided polygon. It is similar to a rectangle, but the angles between its
* edges are not constrained to ninety degrees. The first pair of parameters (x1,y1) sets the first vertex
* and the subsequent pairs should proceed clockwise or counter-clockwise around the defined shape.
*
* @param {float | int} x1 x-coordinate of the first corner
* @param {float | int} y1 y-coordinate of the first corner
* @param {float | int} x2 x-coordinate of the second corner
* @param {float | int} y2 y-coordinate of the second corner
* @param {float | int} x3 x-coordinate of the third corner
* @param {float | int} y3 y-coordinate of the third corner
* @param {float | int} x4 x-coordinate of the fourth corner
* @param {float | int} y4 y-coordinate of the fourth corner
*/
p.quad = function(x1, y1, x2, y2, x3, y3, x4, y4) {
p.beginShape(PConstants.QUADS);
p.vertex(x1, y1, 0);
p.vertex(x2, y2, 0);
p.vertex(x3, y3, 0);
p.vertex(x4, y4, 0);
p.endShape();
};
var roundedRect$2d = function(x, y, width, height, tl, tr, br, bl) {
if (bl === undef) {
tr = tl;
br = tl;
bl = tl;
}
var halfWidth = width / 2,
halfHeight = height / 2;
if (tl > halfWidth || tl > halfHeight) {
tl = Math.min(halfWidth, halfHeight);
}
if (tr > halfWidth || tr > halfHeight) {
tr = Math.min(halfWidth, halfHeight);
}
if (br > halfWidth || br > halfHeight) {
br = Math.min(halfWidth, halfHeight);
}
if (bl > halfWidth || bl > halfHeight) {
bl = Math.min(halfWidth, halfHeight);
}
// Translate the stroke by (0.5, 0.5) to draw a crisp border
if (!doFill || doStroke) {
curContext.translate(0.5, 0.5);
}
curContext.beginPath();
curContext.moveTo(x + tl, y);
curContext.lineTo(x + width - tr, y);
curContext.quadraticCurveTo(x + width, y, x + width, y + tr);
curContext.lineTo(x + width, y + height - br);
curContext.quadraticCurveTo(x + width, y + height, x + width - br, y + height);
curContext.lineTo(x + bl, y + height);
curContext.quadraticCurveTo(x, y + height, x, y + height - bl);
curContext.lineTo(x, y + tl);
curContext.quadraticCurveTo(x, y, x + tl, y);
if (!doFill || doStroke) {
curContext.translate(-0.5, -0.5);
}
executeContextFill();
executeContextStroke();
};
/**
* Draws a rectangle to the screen. A rectangle is a four-sided shape with every angle at ninety
* degrees. The first two parameters set the location, the third sets the width, and the fourth
* sets the height. The origin is changed with the rectMode() function.
*
* @param {int|float} x x-coordinate of the rectangle
* @param {int|float} y y-coordinate of the rectangle
* @param {int|float} width width of the rectangle
* @param {int|float} height height of the rectangle
*
* @see rectMode
* @see quad
*/
Drawing2D.prototype.rect = function(x, y, width, height, tl, tr, br, bl) {
if (!width && !height) {
return;
}
if (curRectMode === PConstants.CORNERS) {
width -= x;
height -= y;
} else if (curRectMode === PConstants.RADIUS) {
width *= 2;
height *= 2;
x -= width / 2;
y -= height / 2;
} else if (curRectMode === PConstants.CENTER) {
x -= width / 2;
y -= height / 2;
}
if (!renderSmooth) {
x = Math.round(x);
y = Math.round(y);
width = Math.round(width);
height = Math.round(height);
}
if (tl !== undef) {
roundedRect$2d(x, y, width, height, tl, tr, br, bl);
return;
}
// Translate the line by (0.5, 0.5) to draw a crisp rectangle border
if (doStroke && lineWidth % 2 === 1) {
curContext.translate(0.5, 0.5);
}
curContext.beginPath();
curContext.rect(x, y, width, height);
executeContextFill();
executeContextStroke();
if (doStroke && lineWidth % 2 === 1) {
curContext.translate(-0.5, -0.5);
}
};
Drawing3D.prototype.rect = function(x, y, width, height, tl, tr, br, bl) {
if (tl !== undef) {
throw "rect() with rounded corners is not supported in 3D mode";
}
if (curRectMode === PConstants.CORNERS) {
width -= x;
height -= y;
} else if (curRectMode === PConstants.RADIUS) {
width *= 2;
height *= 2;
x -= width / 2;
y -= height / 2;
} else if (curRectMode === PConstants.CENTER) {
x -= width / 2;
y -= height / 2;
}
// Modeling transformation
var model = new PMatrix3D();
model.translate(x, y, 0);
model.scale(width, height, 1);
model.transpose();
// viewing transformation needs to have Y flipped
// becuase that's what Processing does.
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
if (lineWidth > 0 && doStroke) {
curContext.useProgram(programObject2D);
uniformMatrix("model2d", programObject2D, "model", false, model.array());
uniformMatrix("view2d", programObject2D, "view", false, view.array());
uniformf("color2d", programObject2D, "color", strokeStyle);
uniformi("picktype2d", programObject2D, "picktype", 0);
vertexAttribPointer("vertex2d", programObject2D, "Vertex", 3, rectBuffer);
disableVertexAttribPointer("aTextureCoord2d", programObject2D, "aTextureCoord");
curContext.drawArrays(curContext.LINE_LOOP, 0, rectVerts.length / 3);
}
if (doFill) {
curContext.useProgram(programObject3D);
uniformMatrix("model3d", programObject3D, "model", false, model.array());
uniformMatrix("view3d", programObject3D, "view", false, view.array());
// fix stitching problems. (lines get occluded by triangles
// since they share the same depth values). This is not entirely
// working, but it's a start for drawing the outline. So
// developers can start playing around with styles.
curContext.enable(curContext.POLYGON_OFFSET_FILL);
curContext.polygonOffset(1, 1);
uniformf("color3d", programObject3D, "color", fillStyle);
if(lightCount > 0){
var v = new PMatrix3D();
v.set(view);
var m = new PMatrix3D();
m.set(model);
v.mult(m);
var normalMatrix = new PMatrix3D();
normalMatrix.set(v);
normalMatrix.invert();
normalMatrix.transpose();
uniformMatrix("normalTransform3d", programObject3D, "normalTransform", false, normalMatrix.array());
vertexAttribPointer("normal3d", programObject3D, "Normal", 3, rectNormBuffer);
}
else{
disableVertexAttribPointer("normal3d", programObject3D, "Normal");
}
vertexAttribPointer("vertex3d", programObject3D, "Vertex", 3, rectBuffer);
curContext.drawArrays(curContext.TRIANGLE_FAN, 0, rectVerts.length / 3);
curContext.disable(curContext.POLYGON_OFFSET_FILL);
}
};
/**
* Draws an ellipse (oval) in the display window. An ellipse with an equal width and height is a circle.
* The first two parameters set the location, the third sets the width, and the fourth sets the height. The origin may be
* changed with the ellipseMode() function.
*
* @param {float|int} x x-coordinate of the ellipse
* @param {float|int} y y-coordinate of the ellipse
* @param {float|int} width width of the ellipse
* @param {float|int} height height of the ellipse
*
* @see ellipseMode
*/
Drawing2D.prototype.ellipse = function(x, y, width, height) {
x = x || 0;
y = y || 0;
if (width <= 0 && height <= 0) {
return;
}
if (curEllipseMode === PConstants.RADIUS) {
width *= 2;
height *= 2;
} else if (curEllipseMode === PConstants.CORNERS) {
width = width - x;
height = height - y;
x += width / 2;
y += height / 2;
} else if (curEllipseMode === PConstants.CORNER) {
x += width / 2;
y += height / 2;
}
// Shortcut for drawing a 2D circle
if (width === height) {
curContext.beginPath();
curContext.arc(x, y, width / 2, 0, PConstants.TWO_PI, false);
executeContextFill();
executeContextStroke();
} else {
var w = width / 2,
h = height / 2,
C = 0.5522847498307933,
c_x = C * w,
c_y = C * h;
p.beginShape();
p.vertex(x + w, y);
p.bezierVertex(x + w, y - c_y, x + c_x, y - h, x, y - h);
p.bezierVertex(x - c_x, y - h, x - w, y - c_y, x - w, y);
p.bezierVertex(x - w, y + c_y, x - c_x, y + h, x, y + h);
p.bezierVertex(x + c_x, y + h, x + w, y + c_y, x + w, y);
p.endShape();
}
};
Drawing3D.prototype.ellipse = function(x, y, width, height) {
x = x || 0;
y = y || 0;
if (width <= 0 && height <= 0) {
return;
}
if (curEllipseMode === PConstants.RADIUS) {
width *= 2;
height *= 2;
} else if (curEllipseMode === PConstants.CORNERS) {
width = width - x;
height = height - y;
x += width / 2;
y += height / 2;
} else if (curEllipseMode === PConstants.CORNER) {
x += width / 2;
y += height / 2;
}
var w = width / 2,
h = height / 2,
C = 0.5522847498307933,
c_x = C * w,
c_y = C * h;
p.beginShape();
p.vertex(x + w, y);
p.bezierVertex(x + w, y - c_y, 0, x + c_x, y - h, 0, x, y - h, 0);
p.bezierVertex(x - c_x, y - h, 0, x - w, y - c_y, 0, x - w, y, 0);
p.bezierVertex(x - w, y + c_y, 0, x - c_x, y + h, 0, x, y + h, 0);
p.bezierVertex(x + c_x, y + h, 0, x + w, y + c_y, 0, x + w, y, 0);
p.endShape();
if (doFill) {
//temporary workaround to not working fills for bezier -- will fix later
var xAv = 0, yAv = 0, i, j;
for (i = 0; i < vertArray.length; i++) {
xAv += vertArray[i][0];
yAv += vertArray[i][1];
}
xAv /= vertArray.length;
yAv /= vertArray.length;
var vert = [],
fillVertArray = [],
colorVertArray = [];
vert[0] = xAv;
vert[1] = yAv;
vert[2] = 0;
vert[3] = 0;
vert[4] = 0;
vert[5] = fillStyle[0];
vert[6] = fillStyle[1];
vert[7] = fillStyle[2];
vert[8] = fillStyle[3];
vert[9] = strokeStyle[0];
vert[10] = strokeStyle[1];
vert[11] = strokeStyle[2];
vert[12] = strokeStyle[3];
vert[13] = normalX;
vert[14] = normalY;
vert[15] = normalZ;
vertArray.unshift(vert);
for (i = 0; i < vertArray.length; i++) {
for (j = 0; j < 3; j++) {
fillVertArray.push(vertArray[i][j]);
}
for (j = 5; j < 9; j++) {
colorVertArray.push(vertArray[i][j]);
}
}
fill3D(fillVertArray, "TRIANGLE_FAN", colorVertArray);
}
};
/**
* Sets the current normal vector. This is for drawing three dimensional shapes and surfaces and
* specifies a vector perpendicular to the surface of the shape which determines how lighting affects
* it. Processing attempts to automatically assign normals to shapes, but since that's imperfect,
* this is a better option when you want more control. This function is identical to glNormal3f() in OpenGL.
*
* @param {float} nx x direction
* @param {float} ny y direction
* @param {float} nz z direction
*
* @see beginShape
* @see endShape
* @see lights
*/
p.normal = function(nx, ny, nz) {
if (arguments.length !== 3 || !(typeof nx === "number" && typeof ny === "number" && typeof nz === "number")) {
throw "normal() requires three numeric arguments.";
}
normalX = nx;
normalY = ny;
normalZ = nz;
if (curShape !== 0) {
if (normalMode === PConstants.NORMAL_MODE_AUTO) {
normalMode = PConstants.NORMAL_MODE_SHAPE;
} else if (normalMode === PConstants.NORMAL_MODE_SHAPE) {
normalMode = PConstants.NORMAL_MODE_VERTEX;
}
}
};
////////////////////////////////////////////////////////////////////////////
// Raster drawing functions
////////////////////////////////////////////////////////////////////////////
/**
* Saves an image from the display window. Images are saved in TIFF, TARGA, JPEG, and PNG format
* depending on the extension within the filename parameter. For example, "image.tif" will have
* a TIFF image and "image.png" will save a PNG image. If no extension is included in the filename,
* the image will save in TIFF format and .tif will be added to the name. These files are saved to
* the sketch's folder, which may be opened by selecting "Show sketch folder" from the "Sketch" menu.
* It is not possible to use save() while running the program in a web browser. All images saved
* from the main drawing window will be opaque. To save images without a background, use createGraphics().
*
* @param {String} filename any sequence of letters and numbers
*
* @see saveFrame
* @see createGraphics
*/
p.save = function(file, img) {
// file is unused at the moment
// may implement this differently in later release
if (img !== undef) {
return window.open(img.toDataURL(),"_blank");
}
return window.open(p.externals.canvas.toDataURL(),"_blank");
};
var saveNumber = 0;
p.saveFrame = function(file) {
if(file === undef) {
// use default name template if parameter is not specified
file = "screen-####.png";
}
// Increment changeable part: screen-0000.png, screen-0001.png, ...
var frameFilename = file.replace(/#+/, function(all) {
var s = "" + (saveNumber++);
while(s.length < all.length) {
s = "0" + s;
}
return s;
});
p.save(frameFilename);
};
var utilityContext2d = document.createElement("canvas").getContext("2d");
var canvasDataCache = [undef, undef, undef]; // we need three for now
function getCanvasData(obj, w, h) {
var canvasData = canvasDataCache.shift();
if (canvasData === undef) {
canvasData = {};
canvasData.canvas = document.createElement("canvas");
canvasData.context = canvasData.canvas.getContext('2d');
}
canvasDataCache.push(canvasData);
var canvas = canvasData.canvas, context = canvasData.context,
width = w || obj.width, height = h || obj.height;
canvas.width = width;
canvas.height = height;
if (!obj) {
context.clearRect(0, 0, width, height);
} else if ("data" in obj) { // ImageData
context.putImageData(obj, 0, 0);
} else {
context.clearRect(0, 0, width, height);
context.drawImage(obj, 0, 0, width, height);
}
return canvasData;
}
/**
* Handle the sketch code for pixels[] and pixels.length
* parser code converts pixels[] to getPixels()
* or setPixels(), .length becomes getLength()
*/
function buildPixelsObject(pImage) {
return {
getLength: (function(aImg) {
return function() {
if (aImg.isRemote) {
throw "Image is loaded remotely. Cannot get length.";
} else {
return aImg.imageData.data.length ? aImg.imageData.data.length/4 : 0;
}
};
}(pImage)),
getPixel: (function(aImg) {
return function(i) {
var offset = i*4,
data = aImg.imageData.data;
if (aImg.isRemote) {
throw "Image is loaded remotely. Cannot get pixels.";
}
return (data[offset+3] << 24) & PConstants.ALPHA_MASK |
(data[offset] << 16) & PConstants.RED_MASK |
(data[offset+1] << 8) & PConstants.GREEN_MASK |
data[offset+2] & PConstants.BLUE_MASK;
};
}(pImage)),
setPixel: (function(aImg) {
return function(i, c) {
var offset = i*4,
data = aImg.imageData.data;
if (aImg.isRemote) {
throw "Image is loaded remotely. Cannot set pixel.";
}
data[offset+0] = (c & PConstants.RED_MASK) >>> 16;
data[offset+1] = (c & PConstants.GREEN_MASK) >>> 8;
data[offset+2] = (c & PConstants.BLUE_MASK);
data[offset+3] = (c & PConstants.ALPHA_MASK) >>> 24;
aImg.__isDirty = true;
};
}(pImage)),
toArray: (function(aImg) {
return function() {
var arr = [],
data = aImg.imageData.data,
length = aImg.width * aImg.height;
if (aImg.isRemote) {
throw "Image is loaded remotely. Cannot get pixels.";
}
for (var i = 0, offset = 0; i < length; i++, offset += 4) {
arr.push( (data[offset+3] << 24) & PConstants.ALPHA_MASK |
(data[offset] << 16) & PConstants.RED_MASK |
(data[offset+1] << 8) & PConstants.GREEN_MASK |
data[offset+2] & PConstants.BLUE_MASK );
}
return arr;
};
}(pImage)),
set: (function(aImg) {
return function(arr) {
var offset,
data,
c;
if (this.isRemote) {
throw "Image is loaded remotely. Cannot set pixels.";
}
data = aImg.imageData.data;
for (var i = 0, aL = arr.length; i < aL; i++) {
c = arr[i];
offset = i*4;
data[offset+0] = (c & PConstants.RED_MASK) >>> 16;
data[offset+1] = (c & PConstants.GREEN_MASK) >>> 8;
data[offset+2] = (c & PConstants.BLUE_MASK);
data[offset+3] = (c & PConstants.ALPHA_MASK) >>> 24;
}
aImg.__isDirty = true;
};
}(pImage))
};
}
/**
* Datatype for storing images. Processing can display .gif, .jpg, .tga, and .png images. Images may be
* displayed in 2D and 3D space. Before an image is used, it must be loaded with the loadImage() function.
* The PImage object contains fields for the width and height of the image, as well as an array called
* pixels[] which contains the values for every pixel in the image. A group of methods, described below,
* allow easy access to the image's pixels and alpha channel and simplify the process of compositing.
* Before using the pixels[] array, be sure to use the loadPixels() method on the image to make sure that the
* pixel data is properly loaded. To create a new image, use the createImage() function (do not use new PImage()).
*
* @param {int} width image width
* @param {int} height image height
* @param {MODE} format Either RGB, ARGB, ALPHA (grayscale alpha channel)
*
* @returns {PImage}
*
* @see loadImage
* @see imageMode
* @see createImage
*/
var PImage = function(aWidth, aHeight, aFormat) {
// Keep track of whether or not the cached imageData has been touched.
this.__isDirty = false;
if (aWidth instanceof HTMLImageElement) {
// convert an to a PImage
this.fromHTMLImageData(aWidth);
} else if (aHeight || aFormat) {
this.width = aWidth || 1;
this.height = aHeight || 1;
// Stuff a canvas into sourceImg so image() calls can use drawImage like an
var canvas = this.sourceImg = document.createElement("canvas");
canvas.width = this.width;
canvas.height = this.height;
//XXX(jeresig): Commenting out imageData stuff
//var imageData = this.imageData = canvas.getContext('2d').createImageData(this.width, this.height);
this.format = (aFormat === PConstants.ARGB || aFormat === PConstants.ALPHA) ? aFormat : PConstants.RGB;
//if (this.format === PConstants.RGB) {
// Set the alpha channel of an RGB image to opaque.
//for (var i = 3, data = this.imageData.data, len = data.length; i < len; i += 4) {
//data[i] = 255;
//}
//}
//this.__isDirty = true;
//this.updatePixels();
} else {
this.width = 0;
this.height = 0;
//XXX(jeresig): Commenting out imageData stuff
//this.imageData = utilityContext2d.createImageData(1, 1);
this.format = PConstants.ARGB;
}
//XXX(jeresig): Commenting out imageData stuff
//this.pixels = buildPixelsObject(this);
};
PImage.prototype = {
/**
* Temporary hack to deal with cross-Processing-instance created PImage. See
* tickets #1623 and #1644.
*/
__isPImage: true,
/**
* @member PImage
* Updates the image with the data in its pixels[] array. Use in conjunction with loadPixels(). If
* you're only reading pixels from the array, there's no need to call updatePixels().
* Certain renderers may or may not seem to require loadPixels() or updatePixels(). However, the rule
* is that any time you want to manipulate the pixels[] array, you must first call loadPixels(), and
* after changes have been made, call updatePixels(). Even if the renderer may not seem to use this
* function in the current Processing release, this will always be subject to change.
* Currently, none of the renderers use the additional parameters to updatePixels().
*/
updatePixels: function() {
var canvas = this.sourceImg;
if (canvas && canvas instanceof HTMLCanvasElement && this.__isDirty) {
canvas.getContext('2d').putImageData(this.imageData, 0, 0);
}
this.__isDirty = false;
},
fromHTMLImageData: function(htmlImg) {
// convert an to a PImage
var canvasData = getCanvasData(htmlImg);
//XXX(jeresig): Commenting out imageData stuff
//try {
//var imageData = canvasData.context.getImageData(0, 0, htmlImg.width, htmlImg.height);
//this.fromImageData(imageData);
//} catch(e) {
if (htmlImg.width && htmlImg.height) {
this.isRemote = true;
this.width = htmlImg.width;
this.height = htmlImg.height;
}
//}
this.sourceImg = htmlImg;
},
'get': function(x, y, w, h) {
if (!arguments.length) {
return p.get(this);
}
if (arguments.length === 2) {
return p.get(x, y, this);
}
if (arguments.length === 4) {
return p.get(x, y, w, h, this);
}
},
/**
* @member PImage
* Changes the color of any pixel or writes an image directly into the image. The x and y parameter
* specify the pixel or the upper-left corner of the image. The color parameter specifies the color value.
* Setting the color of a single pixel with set(x, y) is easy, but not as fast as putting the data
* directly into pixels[]. The equivalent statement to "set(x, y, #000000)" using pixels[] is
* "pixels[y*width+x] = #000000". Processing requires calling loadPixels() to load the display window
* data into the pixels[] array before getting the values and calling updatePixels() to update the window.
*
* @param {int} x x-coordinate of the pixel or upper-left corner of the image
* @param {int} y y-coordinate of the pixel or upper-left corner of the image
* @param {color} color any value of the color datatype
*
* @see get
* @see pixels[]
* @see copy
*/
'set': function(x, y, c) {
p.set(x, y, c, this);
this.__isDirty = true;
},
/**
* @member PImage
* Blends a region of pixels into the image specified by the img parameter. These copies utilize full
* alpha channel support and a choice of the following modes to blend the colors of source pixels (A)
* with the ones of pixels in the destination image (B):
* BLEND - linear interpolation of colours: C = A*factor + B
* ADD - additive blending with white clip: C = min(A*factor + B, 255)
* SUBTRACT - subtractive blending with black clip: C = max(B - A*factor, 0)
* DARKEST - only the darkest colour succeeds: C = min(A*factor, B)
* LIGHTEST - only the lightest colour succeeds: C = max(A*factor, B)
* DIFFERENCE - subtract colors from underlying image.
* EXCLUSION - similar to DIFFERENCE, but less extreme.
* MULTIPLY - Multiply the colors, result will always be darker.
* SCREEN - Opposite multiply, uses inverse values of the colors.
* OVERLAY - A mix of MULTIPLY and SCREEN. Multiplies dark values, and screens light values.
* HARD_LIGHT - SCREEN when greater than 50% gray, MULTIPLY when lower.
* SOFT_LIGHT - Mix of DARKEST and LIGHTEST. Works like OVERLAY, but not as harsh.
* DODGE - Lightens light tones and increases contrast, ignores darks. Called "Color Dodge" in Illustrator and Photoshop.
* BURN - Darker areas are applied, increasing contrast, ignores lights. Called "Color Burn" in Illustrator and Photoshop.
* All modes use the alpha information (highest byte) of source image pixels as the blending factor.
* If the source and destination regions are different sizes, the image will be automatically resized to
* match the destination size. If the srcImg parameter is not used, the display window is used as the source image.
* This function ignores imageMode().
*
* @param {int} x X coordinate of the source's upper left corner
* @param {int} y Y coordinate of the source's upper left corner
* @param {int} width source image width
* @param {int} height source image height
* @param {int} dx X coordinate of the destinations's upper left corner
* @param {int} dy Y coordinate of the destinations's upper left corner
* @param {int} dwidth destination image width
* @param {int} dheight destination image height
* @param {PImage} srcImg an image variable referring to the source image
* @param {MODE} MODE Either BLEND, ADD, SUBTRACT, LIGHTEST, DARKEST, DIFFERENCE, EXCLUSION,
* MULTIPLY, SCREEN, OVERLAY, HARD_LIGHT, SOFT_LIGHT, DODGE, BURN
*
* @see alpha
* @see copy
*/
blend: function(srcImg, x, y, width, height, dx, dy, dwidth, dheight, MODE) {
if (arguments.length === 9) {
p.blend(this, srcImg, x, y, width, height, dx, dy, dwidth, dheight, this);
} else if (arguments.length === 10) {
p.blend(srcImg, x, y, width, height, dx, dy, dwidth, dheight, MODE, this);
}
delete this.sourceImg;
},
/**
* @member PImage
* Copies a region of pixels from one image into another. If the source and destination regions
* aren't the same size, it will automatically resize source pixels to fit the specified target region.
* No alpha information is used in the process, however if the source image has an alpha channel set,
* it will be copied as well. This function ignores imageMode().
*
* @param {int} sx X coordinate of the source's upper left corner
* @param {int} sy Y coordinate of the source's upper left corner
* @param {int} swidth source image width
* @param {int} sheight source image height
* @param {int} dx X coordinate of the destinations's upper left corner
* @param {int} dy Y coordinate of the destinations's upper left corner
* @param {int} dwidth destination image width
* @param {int} dheight destination image height
* @param {PImage} srcImg an image variable referring to the source image
*
* @see alpha
* @see blend
*/
copy: function(srcImg, sx, sy, swidth, sheight, dx, dy, dwidth, dheight) {
if (arguments.length === 8) {
p.blend(this, srcImg, sx, sy, swidth, sheight, dx, dy, dwidth, PConstants.REPLACE, this);
} else if (arguments.length === 9) {
p.blend(srcImg, sx, sy, swidth, sheight, dx, dy, dwidth, dheight, PConstants.REPLACE, this);
}
delete this.sourceImg;
},
/**
* @member PImage
* Filters an image as defined by one of the following modes:
* THRESHOLD - converts the image to black and white pixels depending if they are above or below
* the threshold defined by the level parameter. The level must be between 0.0 (black) and 1.0(white).
* If no level is specified, 0.5 is used.
* GRAY - converts any colors in the image to grayscale equivalents
* INVERT - sets each pixel to its inverse value
* POSTERIZE - limits each channel of the image to the number of colors specified as the level parameter
* BLUR - executes a Guassian blur with the level parameter specifying the extent of the blurring.
* If no level parameter is used, the blur is equivalent to Guassian blur of radius 1.
* OPAQUE - sets the alpha channel to entirely opaque.
* ERODE - reduces the light areas with the amount defined by the level parameter.
* DILATE - increases the light areas with the amount defined by the level parameter
*
* @param {MODE} MODE Either THRESHOLD, GRAY, INVERT, POSTERIZE, BLUR, OPAQUE, ERODE, or DILATE
* @param {int|float} param in the range from 0 to 1
*/
filter: function(mode, param) {
if (arguments.length === 2) {
p.filter(mode, param, this);
} else if (arguments.length === 1) {
// no param specified, send null to show its invalid
p.filter(mode, null, this);
}
delete this.sourceImg;
},
/**
* @member PImage
* Saves the image into a file. Images are saved in TIFF, TARGA, JPEG, and PNG format depending on
* the extension within the filename parameter. For example, "image.tif" will have a TIFF image and
* "image.png" will save a PNG image. If no extension is included in the filename, the image will save
* in TIFF format and .tif will be added to the name. These files are saved to the sketch's folder,
* which may be opened by selecting "Show sketch folder" from the "Sketch" menu. It is not possible to
* use save() while running the program in a web browser.
* To save an image created within the code, rather than through loading, it's necessary to make the
* image with the createImage() function so it is aware of the location of the program and can therefore
* save the file to the right place. See the createImage() reference for more information.
*
* @param {String} filename a sequence of letters and numbers
*/
save: function(file){
p.save(file,this);
},
/**
* @member PImage
* Resize the image to a new width and height. To make the image scale proportionally, use 0 as the
* value for the wide or high parameter.
*
* @param {int} wide the resized image width
* @param {int} high the resized image height
*
* @see get
*/
resize: function(w, h) {
if (this.isRemote) { // Remote images cannot access imageData
throw "Image is loaded remotely. Cannot resize.";
}
if (this.width !== 0 || this.height !== 0) {
// make aspect ratio if w or h is 0
if (w === 0 && h !== 0) {
w = Math.floor(this.width / this.height * h);
} else if (h === 0 && w !== 0) {
h = Math.floor(this.height / this.width * w);
}
// put 'this.imageData' into a new canvas
var canvas = getCanvasData(this.imageData).canvas;
// pull imageData object out of canvas into ImageData object
var imageData = getCanvasData(canvas, w, h).context.getImageData(0, 0, w, h);
// set this as new pimage
this.fromImageData(imageData);
}
},
/**
* @member PImage
* Masks part of an image from displaying by loading another image and using it as an alpha channel.
* This mask image should only contain grayscale data, but only the blue color channel is used. The
* mask image needs to be the same size as the image to which it is applied.
* In addition to using a mask image, an integer array containing the alpha channel data can be
* specified directly. This method is useful for creating dynamically generated alpha masks. This
* array must be of the same length as the target image's pixels array and should contain only grayscale
* data of values between 0-255.
*
* @param {PImage} maskImg any PImage object used as the alpha channel for "img", needs to be same
* size as "img"
* @param {int[]} maskArray any array of Integer numbers used as the alpha channel, needs to be same
* length as the image's pixel array
*/
mask: function(mask) {
var obj = this.toImageData(),
i,
size;
if (mask instanceof PImage || mask.__isPImage) {
if (mask.width === this.width && mask.height === this.height) {
mask = mask.toImageData();
for (i = 2, size = this.width * this.height * 4; i < size; i += 4) {
// using it as an alpha channel
obj.data[i + 1] = mask.data[i];
// but only the blue color channel
}
} else {
throw "mask must have the same dimensions as PImage.";
}
} else if (mask instanceof Array) {
if (this.width * this.height === mask.length) {
for (i = 0, size = mask.length; i < size; ++i) {
obj.data[i * 4 + 3] = mask[i];
}
} else {
throw "mask array must be the same length as PImage pixels array.";
}
}
this.fromImageData(obj);
},
// These are intentionally left blank for PImages, we work live with pixels and draw as necessary
/**
* @member PImage
* Loads the pixel data for the image into its pixels[] array. This function must always be called
* before reading from or writing to pixels[].
* Certain renderers may or may not seem to require loadPixels() or updatePixels(). However, the
* rule is that any time you want to manipulate the pixels[] array, you must first call loadPixels(),
* and after changes have been made, call updatePixels(). Even if the renderer may not seem to use
* this function in the current Processing release, this will always be subject to change.
*/
loadPixels: nop,
toImageData: function() {
if (this.isRemote) {
return this.sourceImg;
}
if (!this.__isDirty) {
return this.imageData;
}
var canvasData = getCanvasData(this.imageData);
return canvasData.context.getImageData(0, 0, this.width, this.height);
},
toDataURL: function() {
if (this.isRemote) { // Remote images cannot access imageData
throw "Image is loaded remotely. Cannot create dataURI.";
}
var canvasData = getCanvasData(this.imageData);
return canvasData.canvas.toDataURL();
},
fromImageData: function(canvasImg) {
var w = canvasImg.width,
h = canvasImg.height,
canvas = document.createElement('canvas'),
ctx = canvas.getContext('2d');
this.width = canvas.width = w;
this.height = canvas.height = h;
ctx.putImageData(canvasImg, 0, 0);
// changed for 0.9
this.format = PConstants.ARGB;
this.imageData = canvasImg;
this.sourceImg = canvas;
}
};
p.PImage = PImage;
/**
* Creates a new PImage (the datatype for storing images). This provides a fresh buffer of pixels to play
* with. Set the size of the buffer with the width and height parameters. The format parameter defines how
* the pixels are stored. See the PImage reference for more information.
* Be sure to include all three parameters, specifying only the width and height (but no format) will
* produce a strange error.
* Advanced users please note that createImage() should be used instead of the syntax new PImage().
*
* @param {int} width image width
* @param {int} height image height
* @param {MODE} format Either RGB, ARGB, ALPHA (grayscale alpha channel)
*
* @returns {PImage}
*
* @see PImage
* @see PGraphics
*/
p.createImage = function(w, h, mode) {
return new PImage(w,h,mode);
};
// Loads an image for display. Type is an extension. Callback is fired on load.
/**
* Loads an image into a variable of type PImage. Four types of images ( .gif, .jpg, .tga, .png) images may
* be loaded. To load correctly, images must be located in the data directory of the current sketch. In most
* cases, load all images in setup() to preload them at the start of the program. Loading images inside draw()
* will reduce the speed of a program.
* The filename parameter can also be a URL to a file found online. For security reasons, a Processing sketch
* found online can only download files from the same server from which it came. Getting around this restriction
* requires a signed applet.
* The extension parameter is used to determine the image type in cases where the image filename does not end
* with a proper extension. Specify the extension as the second parameter to loadImage(), as shown in the
* third example on this page.
* If an image is not loaded successfully, the null value is returned and an error message will be printed to
* the console. The error message does not halt the program, however the null value may cause a NullPointerException
* if your code does not check whether the value returned from loadImage() is null.
* Depending on the type of error, a PImage object may still be returned, but the width and height of the image
* will be set to -1. This happens if bad image data is returned or cannot be decoded properly. Sometimes this happens
* with image URLs that produce a 403 error or that redirect to a password prompt, because loadImage() will attempt
* to interpret the HTML as image data.
*
* @param {String} filename name of file to load, can be .gif, .jpg, .tga, or a handful of other image
* types depending on your platform.
* @param {String} extension the type of image to load, for example "png", "gif", "jpg"
*
* @returns {PImage}
*
* @see PImage
* @see image
* @see imageMode
* @see background
*/
p.loadImage = function(file, type, callback) {
// if type is specified add it with a . to file to make the filename
if (type) {
file = file + "." + type;
}
var pimg;
// if image is in the preloader cache return a new PImage
if (curSketch.imageCache.images[file]) {
pimg = new PImage(curSketch.imageCache.images[file]);
pimg.loaded = true;
return pimg;
}
// else async load it
pimg = new PImage();
var img = document.createElement('img');
pimg.sourceImg = img;
img.onload = (function(aImage, aPImage, aCallback) {
var image = aImage;
var pimg = aPImage;
var callback = aCallback;
return function() {
// change the object into a PImage now that its loaded
pimg.fromHTMLImageData(image);
pimg.loaded = true;
if (callback) {
callback();
}
};
}(img, pimg, callback));
img.src = file; // needs to be called after the img.onload function is declared or it wont work in opera
return pimg;
};
// async loading of large images, same functionality as loadImage above
/**
* This function load images on a separate thread so that your sketch does not freeze while images load during
* setup(). While the image is loading, its width and height will be 0. If an error occurs while loading the image,
* its width and height will be set to -1. You'll know when the image has loaded properly because its width and
* height will be greater than 0. Asynchronous image loading (particularly when downloading from a server) can
* dramatically improve performance.
* The extension parameter is used to determine the image type in cases where the image filename does not end
* with a proper extension. Specify the extension as the second parameter to requestImage().
*
* @param {String} filename name of file to load, can be .gif, .jpg, .tga, or a handful of other image
* types depending on your platform.
* @param {String} extension the type of image to load, for example "png", "gif", "jpg"
*
* @returns {PImage}
*
* @see PImage
* @see loadImage
*/
p.requestImage = p.loadImage;
function get$2(x,y) {
var data;
// return the color at x,y (int) of curContext
if (x >= p.width || x < 0 || y < 0 || y >= p.height) {
// x,y is outside image return transparent black
return 0;
}
// loadPixels() has been called
if (isContextReplaced) {
var offset = ((0|x) + p.width * (0|y)) * 4;
data = p.imageData.data;
return (data[offset + 3] << 24) & PConstants.ALPHA_MASK |
(data[offset] << 16) & PConstants.RED_MASK |
(data[offset + 1] << 8) & PConstants.GREEN_MASK |
data[offset + 2] & PConstants.BLUE_MASK;
}
// x,y is inside canvas space
data = p.toImageData(0|x, 0|y, 1, 1).data;
return (data[3] << 24) & PConstants.ALPHA_MASK |
(data[0] << 16) & PConstants.RED_MASK |
(data[1] << 8) & PConstants.GREEN_MASK |
data[2] & PConstants.BLUE_MASK;
}
function get$3(x,y,img) {
if (img.isRemote) { // Remote images cannot access imageData
throw "Image is loaded remotely. Cannot get x,y.";
}
// PImage.get(x,y) was called, return the color (int) at x,y of img
var offset = y * img.width * 4 + (x * 4),
data = img.imageData.data;
return (data[offset + 3] << 24) & PConstants.ALPHA_MASK |
(data[offset] << 16) & PConstants.RED_MASK |
(data[offset + 1] << 8) & PConstants.GREEN_MASK |
data[offset + 2] & PConstants.BLUE_MASK;
}
function get$4(x, y, w, h) {
// return a PImage of w and h from cood x,y of curContext
var c = new PImage(w, h, PConstants.ARGB);
c.fromImageData(p.toImageData(x, y, w, h));
return c;
}
function get$5(x, y, w, h, img) {
if (img.isRemote) { // Remote images cannot access imageData
throw "Image is loaded remotely. Cannot get x,y,w,h.";
}
// PImage.get(x,y,w,h) was called, return x,y,w,h PImage of img
// offset start point needs to be *4
var c = new PImage(w, h, PConstants.ARGB), cData = c.imageData.data,
imgWidth = img.width, imgHeight = img.height, imgData = img.imageData.data;
// Don't need to copy pixels from the image outside ranges.
var startRow = Math.max(0, -y), startColumn = Math.max(0, -x),
stopRow = Math.min(h, imgHeight - y), stopColumn = Math.min(w, imgWidth - x);
for (var i = startRow; i < stopRow; ++i) {
var sourceOffset = ((y + i) * imgWidth + (x + startColumn)) * 4;
var targetOffset = (i * w + startColumn) * 4;
for (var j = startColumn; j < stopColumn; ++j) {
cData[targetOffset++] = imgData[sourceOffset++];
cData[targetOffset++] = imgData[sourceOffset++];
cData[targetOffset++] = imgData[sourceOffset++];
cData[targetOffset++] = imgData[sourceOffset++];
}
}
c.__isDirty = true;
return c;
}
// Gets a single pixel or block of pixels from the current Canvas Context or a PImage
/**
* Reads the color of any pixel or grabs a section of an image. If no parameters are specified, the entire
* image is returned. Get the value of one pixel by specifying an x,y coordinate. Get a section of the display
* window by specifying an additional width and height parameter. If the pixel requested is outside of the image
* window, black is returned. The numbers returned are scaled according to the current color ranges, but only RGB
* values are returned by this function. For example, even though you may have drawn a shape with colorMode(HSB),
* the numbers returned will be in RGB.
* Getting the color of a single pixel with get(x, y) is easy, but not as fast as grabbing the data directly
* from pixels[]. The equivalent statement to "get(x, y)" using pixels[] is "pixels[y*width+x]". Processing
* requires calling loadPixels() to load the display window data into the pixels[] array before getting the values.
* This function ignores imageMode().
*
* @param {int} x x-coordinate of the pixel
* @param {int} y y-coordinate of the pixel
* @param {int} width width of pixel rectangle to get
* @param {int} height height of pixel rectangle to get
*
* @returns {Color|PImage}
*
* @see set
* @see pixels[]
* @see imageMode
*/
p.get = function(x, y, w, h, img) {
// for 0 2 and 4 arguments use curContext, otherwise PImage.get was called
if (img !== undefined) {
return get$5(x, y, w, h, img);
}
if (h !== undefined) {
return get$4(x, y, w, h);
}
if (w !== undefined) {
return get$3(x, y, w);
}
if (y !== undefined) {
return get$2(x, y);
}
if (x !== undefined) {
// PImage.get() was called, return a new PImage
return get$5(0, 0, x.width, x.height, x);
}
return get$4(0, 0, p.width, p.height);
};
/**
* Creates and returns a new PGraphics object of the types P2D, P3D, and JAVA2D. Use this class if you need to draw
* into an off-screen graphics buffer. It's not possible to use createGraphics() with OPENGL, because it doesn't
* allow offscreen use. The DXF and PDF renderers require the filename parameter.
It's important to call
* any drawing commands between beginDraw() and endDraw() statements. This is also true for any commands that affect
* drawing, such as smooth() or colorMode().
Unlike the main drawing surface which is completely opaque,
* surfaces created with createGraphics() can have transparency. This makes it possible to draw into a graphics and
* maintain the alpha channel.
*
* @param {int} width width in pixels
* @param {int} height height in pixels
* @param {int} renderer Either P2D, P3D, JAVA2D, PDF, DXF
* @param {String} filename the name of the file (not supported yet)
*/
p.createGraphics = function(w, h, render) {
var pg = new Processing();
pg.size(w, h, render);
return pg;
};
// pixels caching
function resetContext() {
if(isContextReplaced) {
curContext = originalContext;
isContextReplaced = false;
p.updatePixels();
}
}
function SetPixelContextWrapper() {
function wrapFunction(newContext, name) {
function wrapper() {
resetContext();
curContext[name].apply(curContext, arguments);
}
newContext[name] = wrapper;
}
function wrapProperty(newContext, name) {
function getter() {
resetContext();
return curContext[name];
}
function setter(value) {
resetContext();
curContext[name] = value;
}
p.defineProperty(newContext, name, { get: getter, set: setter });
}
for(var n in curContext) {
if(typeof curContext[n] === 'function') {
wrapFunction(this, n);
} else {
wrapProperty(this, n);
}
}
}
function replaceContext() {
if(isContextReplaced) {
return;
}
p.loadPixels();
if(proxyContext === null) {
originalContext = curContext;
proxyContext = new SetPixelContextWrapper();
}
isContextReplaced = true;
curContext = proxyContext;
setPixelsCached = 0;
}
function set$3(x, y, c) {
if (x < p.width && x >= 0 && y >= 0 && y < p.height) {
replaceContext();
p.pixels.setPixel((0|x)+p.width*(0|y), c);
if(++setPixelsCached > maxPixelsCached) {
resetContext();
}
}
}
function set$4(x, y, obj, img) {
if (img.isRemote) { // Remote images cannot access imageData
throw "Image is loaded remotely. Cannot set x,y.";
}
var c = p.color.toArray(obj);
var offset = y * img.width * 4 + (x*4);
var data = img.imageData.data;
data[offset] = c[0];
data[offset+1] = c[1];
data[offset+2] = c[2];
data[offset+3] = c[3];
}
// Paints a pixel array into the canvas
/**
* Changes the color of any pixel or writes an image directly into the display window. The x and y parameters
* specify the pixel to change and the color parameter specifies the color value. The color parameter is affected
* by the current color mode (the default is RGB values from 0 to 255). When setting an image, the x and y
* parameters define the coordinates for the upper-left corner of the image.
* Setting the color of a single pixel with set(x, y) is easy, but not as fast as putting the data directly
* into pixels[]. The equivalent statement to "set(x, y, #000000)" using pixels[] is "pixels[y*width+x] = #000000".
* You must call loadPixels() to load the display window data into the pixels[] array before setting the values
* and calling updatePixels() to update the window with any changes. This function ignores imageMode().
*
* @param {int} x x-coordinate of the pixel
* @param {int} y y-coordinate of the pixel
* @param {Color} obj any value of the color datatype
* @param {PImage} img any valid variable of type PImage
*
* @see get
* @see pixels[]
* @see imageMode
*/
p.set = function(x, y, obj, img) {
var color, oldFill;
if (arguments.length === 3) {
// called p.set(), was it with a color or a img ?
if (typeof obj === "number") {
set$3(x, y, obj);
} else if (obj instanceof PImage || obj.__isPImage) {
p.image(obj, x, y);
}
} else if (arguments.length === 4) {
// PImage.set(x,y,c) was called, set coordinate x,y color to c of img
set$4(x, y, obj, img);
}
};
p.imageData = {};
// handle the sketch code for pixels[]
// parser code converts pixels[] to getPixels() or setPixels(),
// .length becomes getLength()
/**
* Array containing the values for all the pixels in the display window. These values are of the color datatype.
* This array is the size of the display window. For example, if the image is 100x100 pixels, there will be 10000
* values and if the window is 200x300 pixels, there will be 60000 values. The index value defines the position
* of a value within the array. For example, the statment color b = pixels[230] will set the variable b to be
* equal to the value at that location in the array.
* Before accessing this array, the data must loaded with the loadPixels() function. After the array data has
* been modified, the updatePixels() function must be run to update the changes.
*
* @param {int} index must not exceed the size of the array
*
* @see loadPixels
* @see updatePixels
* @see get
* @see set
* @see PImage
*/
p.pixels = {
getLength: function() { return p.imageData.data.length ? p.imageData.data.length/4 : 0; },
getPixel: function(i) {
var offset = i*4, data = p.imageData.data;
return (data[offset+3] << 24) & 0xff000000 |
(data[offset+0] << 16) & 0x00ff0000 |
(data[offset+1] << 8) & 0x0000ff00 |
data[offset+2] & 0x000000ff;
},
setPixel: function(i,c) {
var offset = i*4, data = p.imageData.data;
data[offset+0] = (c & 0x00ff0000) >>> 16; // RED_MASK
data[offset+1] = (c & 0x0000ff00) >>> 8; // GREEN_MASK
data[offset+2] = (c & 0x000000ff); // BLUE_MASK
data[offset+3] = (c & 0xff000000) >>> 24; // ALPHA_MASK
},
toArray: function() {
var arr = [], length = p.imageData.width * p.imageData.height, data = p.imageData.data;
for (var i = 0, offset = 0; i < length; i++, offset += 4) {
arr.push((data[offset+3] << 24) & 0xff000000 |
(data[offset+0] << 16) & 0x00ff0000 |
(data[offset+1] << 8) & 0x0000ff00 |
data[offset+2] & 0x000000ff);
}
return arr;
},
set: function(arr) {
for (var i = 0, aL = arr.length; i < aL; i++) {
this.setPixel(i, arr[i]);
}
}
};
// Gets a 1-Dimensional pixel array from Canvas
/**
* Loads the pixel data for the display window into the pixels[] array. This function must always be called
* before reading from or writing to pixels[].
* Certain renderers may or may not seem to require loadPixels() or updatePixels(). However, the rule is that
* any time you want to manipulate the pixels[] array, you must first call loadPixels(), and after changes
* have been made, call updatePixels(). Even if the renderer may not seem to use this function in the current
* Processing release, this will always be subject to change.
*
* @see pixels[]
* @see updatePixels
*/
p.loadPixels = function() {
p.imageData = drawing.$ensureContext().getImageData(0, 0, p.width, p.height);
};
// Draws a 1-Dimensional pixel array to Canvas
/**
* Updates the display window with the data in the pixels[] array. Use in conjunction with loadPixels(). If
* you're only reading pixels from the array, there's no need to call updatePixels() unless there are changes.
* Certain renderers may or may not seem to require loadPixels() or updatePixels(). However, the rule is that
* any time you want to manipulate the pixels[] array, you must first call loadPixels(), and after changes
* have been made, call updatePixels(). Even if the renderer may not seem to use this function in the current
* Processing release, this will always be subject to change.
* Currently, none of the renderers use the additional parameters to updatePixels(), however this may be
* implemented in the future.
*
* @see loadPixels
* @see pixels[]
*/
p.updatePixels = function() {
if (p.imageData) {
drawing.$ensureContext().putImageData(p.imageData, 0, 0);
}
};
/**
* Set various hints and hacks for the renderer. This is used to handle obscure rendering features that cannot be
* implemented in a consistent manner across renderers. Many options will often graduate to standard features
* instead of hints over time.
* hint(ENABLE_OPENGL_4X_SMOOTH) - Enable 4x anti-aliasing for OpenGL. This can help force anti-aliasing if
* it has not been enabled by the user. On some graphics cards, this can also be set by the graphics driver's
* control panel, however not all cards make this available. This hint must be called immediately after the
* size() command because it resets the renderer, obliterating any settings and anything drawn (and like size(),
* re-running the code that came before it again).
* hint(DISABLE_OPENGL_2X_SMOOTH) - In Processing 1.0, Processing always enables 2x smoothing when the OpenGL
* renderer is used. This hint disables the default 2x smoothing and returns the smoothing behavior found in
* earlier releases, where smooth() and noSmooth() could be used to enable and disable smoothing, though the
* quality was inferior.
* hint(ENABLE_NATIVE_FONTS) - Use the native version fonts when they are installed, rather than the bitmapped
* version from a .vlw file. This is useful with the JAVA2D renderer setting, as it will improve font rendering
* speed. This is not enabled by default, because it can be misleading while testing because the type will look
* great on your machine (because you have the font installed) but lousy on others' machines if the identical
* font is unavailable. This option can only be set per-sketch, and must be called before any use of textFont().
* hint(DISABLE_DEPTH_TEST) - Disable the zbuffer, allowing you to draw on top of everything at will. When depth
* testing is disabled, items will be drawn to the screen sequentially, like a painting. This hint is most often
* used to draw in 3D, then draw in 2D on top of it (for instance, to draw GUI controls in 2D on top of a 3D
* interface). Starting in release 0149, this will also clear the depth buffer. Restore the default with
* hint(ENABLE_DEPTH_TEST), but note that with the depth buffer cleared, any 3D drawing that happens later in
* draw() will ignore existing shapes on the screen.
* hint(ENABLE_DEPTH_SORT) - Enable primitive z-sorting of triangles and lines in P3D and OPENGL. This can slow
* performance considerably, and the algorithm is not yet perfect. Restore the default with hint(DISABLE_DEPTH_SORT).
* hint(DISABLE_OPENGL_ERROR_REPORT) - Speeds up the OPENGL renderer setting by not checking for errors while
* running. Undo with hint(ENABLE_OPENGL_ERROR_REPORT).
* As of release 0149, unhint() has been removed in favor of adding additional ENABLE/DISABLE constants to reset
* the default behavior. This prevents the double negatives, and also reinforces which hints can be enabled or disabled.
*
* @param {MODE} item constant: name of the hint to be enabled or disabled
*
* @see PGraphics
* @see createGraphics
* @see size
*/
p.hint = function(which) {
var curContext = drawing.$ensureContext();
if (which === PConstants.DISABLE_DEPTH_TEST) {
curContext.disable(curContext.DEPTH_TEST);
curContext.depthMask(false);
curContext.clear(curContext.DEPTH_BUFFER_BIT);
}
else if (which === PConstants.ENABLE_DEPTH_TEST) {
curContext.enable(curContext.DEPTH_TEST);
curContext.depthMask(true);
}
};
/**
* The background() function sets the color used for the background of the Processing window.
* The default background is light gray. In the draw() function, the background color is used to clear the display window at the beginning of each frame.
* An image can also be used as the background for a sketch, however its width and height must be the same size as the sketch window.
* To resize an image 'b' to the size of the sketch window, use b.resize(width, height).
* Images used as background will ignore the current tint() setting.
* For the main drawing surface, the alpha value will be ignored. However,
* alpha can be used on PGraphics objects from createGraphics(). This is
* the only way to set all the pixels partially transparent, for instance.
* If the 'gray' parameter is passed in the function sets the background to a grayscale value, based on the
* current colorMode.
*
* Note that background() should be called before any transformations occur,
* because some implementations may require the current transformation matrix
* to be identity before drawing.
*
* @param {int|float} gray specifies a value between white and black
* @param {int|float} value1 red or hue value (depending on the current color mode)
* @param {int|float} value2 green or saturation value (depending on the current color mode)
* @param {int|float} value3 blue or brightness value (depending on the current color mode)
* @param {int|float} alpha opacity of the background
* @param {Color} color any value of the color datatype
* @param {int} hex color value in hexadecimal notation (i.e. #FFCC00 or 0xFFFFCC00)
* @param {PImage} image an instance of a PImage to use as a background
*
* @see #stroke()
* @see #fill()
* @see #tint()
* @see #colorMode()
*/
var backgroundHelper = function(arg1, arg2, arg3, arg4) {
var obj;
if (arg1 instanceof PImage || arg1.__isPImage) {
obj = arg1;
if (!obj.loaded) {
throw "Error using image in background(): PImage not loaded.";
}
if(obj.width !== p.width || obj.height !== p.height){
throw "Background image must be the same dimensions as the canvas.";
}
} else {
obj = p.color(arg1, arg2, arg3, arg4);
}
backgroundObj = obj;
};
Drawing2D.prototype.background = function(arg1, arg2, arg3, arg4) {
if (arg1 !== undef) {
backgroundHelper(arg1, arg2, arg3, arg4);
}
if (backgroundObj instanceof PImage || backgroundObj.__isPImage) {
saveContext();
curContext.setTransform(1, 0, 0, 1, 0, 0);
p.image(backgroundObj, 0, 0);
restoreContext();
} else {
saveContext();
curContext.setTransform(1, 0, 0, 1, 0, 0);
// If the background is transparent
if (p.alpha(backgroundObj) !== colorModeA) {
curContext.clearRect(0,0, p.width, p.height);
}
curContext.fillStyle = p.color.toString(backgroundObj);
curContext.fillRect(0, 0, p.width, p.height);
isFillDirty = true;
restoreContext();
}
};
Drawing3D.prototype.background = function(arg1, arg2, arg3, arg4) {
if (arguments.length > 0) {
backgroundHelper(arg1, arg2, arg3, arg4);
}
var c = p.color.toGLArray(backgroundObj);
curContext.clearColor(c[0], c[1], c[2], c[3]);
curContext.clear(curContext.COLOR_BUFFER_BIT | curContext.DEPTH_BUFFER_BIT);
// An image as a background in 3D is not implemented yet
};
// Draws an image to the Canvas
/**
* Displays images to the screen. The images must be in the sketch's "data" directory to load correctly. Select "Add
* file..." from the "Sketch" menu to add the image. Processing currently works with GIF, JPEG, and Targa images. The
* color of an image may be modified with the tint() function and if a GIF has transparency, it will maintain its
* transparency. The img parameter specifies the image to display and the x and y parameters define the location of
* the image from its upper-left corner. The image is displayed at its original size unless the width and height
* parameters specify a different size. The imageMode() function changes the way the parameters work. A call to
* imageMode(CORNERS) will change the width and height parameters to define the x and y values of the opposite
* corner of the image.
*
* @param {PImage} img the image to display
* @param {int|float} x x-coordinate of the image
* @param {int|float} y y-coordinate of the image
* @param {int|float} width width to display the image
* @param {int|float} height height to display the image
*
* @see loadImage
* @see PImage
* @see imageMode
* @see tint
* @see background
* @see alpha
*/
Drawing2D.prototype.image = function(img, x, y, w, h) {
// Fix fractional positions
x = Math.round(x);
y = Math.round(y);
if (img.width > 0) {
var wid = w || img.width;
var hgt = h || img.height;
var bounds = imageModeConvert(x || 0, y || 0, w || img.width, h || img.height, arguments.length < 4);
var fastImage = !!img.sourceImg && curTint === null;
if (fastImage) {
var htmlElement = img.sourceImg;
if (img.__isDirty) {
img.updatePixels();
}
// Using HTML element's width and height in case if the image was resized.
curContext.drawImage(htmlElement, 0, 0,
htmlElement.width, htmlElement.height, bounds.x, bounds.y, bounds.w, bounds.h);
} else {
var obj = img.toImageData();
// Tint the image
if (curTint !== null) {
curTint(obj);
img.__isDirty = true;
}
curContext.drawImage(getCanvasData(obj).canvas, 0, 0,
img.width, img.height, bounds.x, bounds.y, bounds.w, bounds.h);
}
}
};
Drawing3D.prototype.image = function(img, x, y, w, h) {
if (img.width > 0) {
// Fix fractional positions
x = Math.round(x);
y = Math.round(y);
w = w || img.width;
h = h || img.height;
p.beginShape(p.QUADS);
p.texture(img);
p.vertex(x, y, 0, 0, 0);
p.vertex(x, y+h, 0, 0, h);
p.vertex(x+w, y+h, 0, w, h);
p.vertex(x+w, y, 0, w, 0);
p.endShape();
}
};
/**
* The tint() function sets the fill value for displaying images. Images can be tinted to
* specified colors or made transparent by setting the alpha.
*
To make an image transparent, but not change it's color,
* use white as the tint color and specify an alpha value. For instance,
* tint(255, 128) will make an image 50% transparent (unless
* colorMode() has been used).
*
*
When using hexadecimal notation to specify a color, use "#" or
* "0x" before the values (e.g. #CCFFAA, 0xFFCCFFAA). The # syntax uses six
* digits to specify a color (the way colors are specified in HTML and CSS).
* When using the hexadecimal notation starting with "0x", the hexadecimal
* value must be specified with eight characters; the first two characters
* define the alpha component and the remainder the red, green, and blue
* components.
*
The value for the parameter "gray" must be less than or equal
* to the current maximum value as specified by colorMode().
* The default maximum value is 255.
*
The tint() method is also used to control the coloring of
* textures in 3D.
*
* @param {int|float} gray any valid number
* @param {int|float} alpha opacity of the image
* @param {int|float} value1 red or hue value
* @param {int|float} value2 green or saturation value
* @param {int|float} value3 blue or brightness value
* @param {int|float} color any value of the color datatype
* @param {int} hex color value in hexadecimal notation (i.e. #FFCC00 or 0xFFFFCC00)
*
* @see #noTint()
* @see #image()
*/
p.tint = function(a1, a2, a3, a4) {
var tintColor = p.color(a1, a2, a3, a4);
var r = p.red(tintColor) / colorModeX;
var g = p.green(tintColor) / colorModeY;
var b = p.blue(tintColor) / colorModeZ;
var a = p.alpha(tintColor) / colorModeA;
curTint = function(obj) {
var data = obj.data,
length = 4 * obj.width * obj.height;
for (var i = 0; i < length;) {
data[i++] *= r;
data[i++] *= g;
data[i++] *= b;
data[i++] *= a;
}
};
// for overriding the color buffer when 3d rendering
curTint3d = function(data){
for (var i = 0; i < data.length;) {
data[i++] = r;
data[i++] = g;
data[i++] = b;
data[i++] = a;
}
};
};
/**
* The noTint() function removes the current fill value for displaying images and reverts to displaying images with their original hues.
*
* @see #tint()
* @see #image()
*/
p.noTint = function() {
curTint = null;
curTint3d = null;
};
/**
* Copies a region of pixels from the display window to another area of the display window and copies a region of pixels from an
* image used as the srcImg parameter into the display window. If the source and destination regions aren't the same size, it will
* automatically resize the source pixels to fit the specified target region. No alpha information is used in the process, however
* if the source image has an alpha channel set, it will be copied as well. This function ignores imageMode().
*
* @param {int} x X coordinate of the source's upper left corner
* @param {int} y Y coordinate of the source's upper left corner
* @param {int} width source image width
* @param {int} height source image height
* @param {int} dx X coordinate of the destination's upper left corner
* @param {int} dy Y coordinate of the destination's upper left corner
* @param {int} dwidth destination image width
* @param {int} dheight destination image height
* @param {PImage} srcImg image variable referring to the source image
*
* @see blend
* @see get
*/
p.copy = function(src, sx, sy, sw, sh, dx, dy, dw, dh) {
if (dh === undef) {
// shift everything, and introduce p
dh = dw;
dw = dy;
dy = dx;
dx = sh;
sh = sw;
sw = sy;
sy = sx;
sx = src;
src = p;
}
p.blend(src, sx, sy, sw, sh, dx, dy, dw, dh, PConstants.REPLACE);
};
/**
* Blends a region of pixels from one image into another (or in itself again) with full alpha channel support. There
* is a choice of the following modes to blend the source pixels (A) with the ones of pixels in the destination image (B):
* BLEND - linear interpolation of colours: C = A*factor + B
* ADD - additive blending with white clip: C = min(A*factor + B, 255)
* SUBTRACT - subtractive blending with black clip: C = max(B - A*factor, 0)
* DARKEST - only the darkest colour succeeds: C = min(A*factor, B)
* LIGHTEST - only the lightest colour succeeds: C = max(A*factor, B)
* DIFFERENCE - subtract colors from underlying image.
* EXCLUSION - similar to DIFFERENCE, but less extreme.
* MULTIPLY - Multiply the colors, result will always be darker.
* SCREEN - Opposite multiply, uses inverse values of the colors.
* OVERLAY - A mix of MULTIPLY and SCREEN. Multiplies dark values, and screens light values.
* HARD_LIGHT - SCREEN when greater than 50% gray, MULTIPLY when lower.
* SOFT_LIGHT - Mix of DARKEST and LIGHTEST. Works like OVERLAY, but not as harsh.
* DODGE - Lightens light tones and increases contrast, ignores darks. Called "Color Dodge" in Illustrator and Photoshop.
* BURN - Darker areas are applied, increasing contrast, ignores lights. Called "Color Burn" in Illustrator and Photoshop.
* All modes use the alpha information (highest byte) of source image pixels as the blending factor. If the source and
* destination regions are different sizes, the image will be automatically resized to match the destination size. If the
* srcImg parameter is not used, the display window is used as the source image. This function ignores imageMode().
*
* @param {int} x X coordinate of the source's upper left corner
* @param {int} y Y coordinate of the source's upper left corner
* @param {int} width source image width
* @param {int} height source image height
* @param {int} dx X coordinate of the destination's upper left corner
* @param {int} dy Y coordinate of the destination's upper left corner
* @param {int} dwidth destination image width
* @param {int} dheight destination image height
* @param {PImage} srcImg image variable referring to the source image
* @param {PImage} MODE Either BLEND, ADD, SUBTRACT, LIGHTEST, DARKEST, DIFFERENCE, EXCLUSION, MULTIPLY, SCREEN,
* OVERLAY, HARD_LIGHT, SOFT_LIGHT, DODGE, BURN
* @see filter
*/
p.blend = function(src, sx, sy, sw, sh, dx, dy, dw, dh, mode, pimgdest) {
if (src.isRemote) {
throw "Image is loaded remotely. Cannot blend image.";
}
if (mode === undef) {
// shift everything, and introduce p
mode = dh;
dh = dw;
dw = dy;
dy = dx;
dx = sh;
sh = sw;
sw = sy;
sy = sx;
sx = src;
src = p;
}
var sx2 = sx + sw,
sy2 = sy + sh,
dx2 = dx + dw,
dy2 = dy + dh,
dest = pimgdest || p;
// check if pimgdest is there and pixels, if so this was a call from pimg.blend
if (pimgdest === undef || mode === undef) {
p.loadPixels();
}
src.loadPixels();
if (src === p && p.intersect(sx, sy, sx2, sy2, dx, dy, dx2, dy2)) {
p.blit_resize(p.get(sx, sy, sx2 - sx, sy2 - sy), 0, 0, sx2 - sx - 1, sy2 - sy - 1,
dest.imageData.data, dest.width, dest.height, dx, dy, dx2, dy2, mode);
} else {
p.blit_resize(src, sx, sy, sx2, sy2, dest.imageData.data, dest.width, dest.height, dx, dy, dx2, dy2, mode);
}
if (pimgdest === undef) {
p.updatePixels();
}
};
// helper function for filter()
var buildBlurKernel = function(r) {
var radius = p.floor(r * 3.5), i, radiusi;
radius = (radius < 1) ? 1 : ((radius < 248) ? radius : 248);
if (p.shared.blurRadius !== radius) {
p.shared.blurRadius = radius;
p.shared.blurKernelSize = 1 + (p.shared.blurRadius<<1);
p.shared.blurKernel = new Float32Array(p.shared.blurKernelSize);
var sharedBlurKernal = p.shared.blurKernel;
var sharedBlurKernelSize = p.shared.blurKernelSize;
var sharedBlurRadius = p.shared.blurRadius;
// init blurKernel
for (i = 0; i < sharedBlurKernelSize; i++) {
sharedBlurKernal[i] = 0;
}
var radiusiSquared = (radius - 1) * (radius - 1);
for (i = 1; i < radius; i++) {
sharedBlurKernal[radius + i] = sharedBlurKernal[radiusi] = radiusiSquared;
}
sharedBlurKernal[radius] = radius * radius;
}
};
var blurARGB = function(r, aImg) {
var sum, cr, cg, cb, ca, c, m;
var read, ri, ym, ymi, bk0;
var wh = aImg.pixels.getLength();
var r2 = new Float32Array(wh);
var g2 = new Float32Array(wh);
var b2 = new Float32Array(wh);
var a2 = new Float32Array(wh);
var yi = 0;
var x, y, i, offset;
buildBlurKernel(r);
var aImgHeight = aImg.height;
var aImgWidth = aImg.width;
var sharedBlurKernelSize = p.shared.blurKernelSize;
var sharedBlurRadius = p.shared.blurRadius;
var sharedBlurKernal = p.shared.blurKernel;
var pix = aImg.imageData.data;
for (y = 0; y < aImgHeight; y++) {
for (x = 0; x < aImgWidth; x++) {
cb = cg = cr = ca = sum = 0;
read = x - sharedBlurRadius;
if (read<0) {
bk0 = -read;
read = 0;
} else {
if (read >= aImgWidth) {
break;
}
bk0=0;
}
for (i = bk0; i < sharedBlurKernelSize; i++) {
if (read >= aImgWidth) {
break;
}
offset = (read + yi) *4;
m = sharedBlurKernal[i];
ca += m * pix[offset + 3];
cr += m * pix[offset];
cg += m * pix[offset + 1];
cb += m * pix[offset + 2];
sum += m;
read++;
}
ri = yi + x;
a2[ri] = ca / sum;
r2[ri] = cr / sum;
g2[ri] = cg / sum;
b2[ri] = cb / sum;
}
yi += aImgWidth;
}
yi = 0;
ym = -sharedBlurRadius;
ymi = ym*aImgWidth;
for (y = 0; y < aImgHeight; y++) {
for (x = 0; x < aImgWidth; x++) {
cb = cg = cr = ca = sum = 0;
if (ym<0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= aImgHeight) {
break;
}
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (i = bk0; i < sharedBlurKernelSize; i++) {
if (ri >= aImgHeight) {
break;
}
m = sharedBlurKernal[i];
ca += m * a2[read];
cr += m * r2[read];
cg += m * g2[read];
cb += m * b2[read];
sum += m;
ri++;
read += aImgWidth;
}
offset = (x + yi) *4;
pix[offset] = cr / sum;
pix[offset + 1] = cg / sum;
pix[offset + 2] = cb / sum;
pix[offset + 3] = ca / sum;
}
yi += aImgWidth;
ymi += aImgWidth;
ym++;
}
};
// helper funtion for ERODE and DILATE modes of filter()
var dilate = function(isInverted, aImg) {
var currIdx = 0;
var maxIdx = aImg.pixels.getLength();
var out = new Int32Array(maxIdx);
var currRowIdx, maxRowIdx, colOrig, colOut, currLum;
var idxRight, idxLeft, idxUp, idxDown,
colRight, colLeft, colUp, colDown,
lumRight, lumLeft, lumUp, lumDown;
if (!isInverted) {
// erosion (grow light areas)
while (currIdx= maxRowIdx) {
idxRight = currIdx;
}
if (idxUp < 0) {
idxUp = 0;
}
if (idxDown >= maxIdx) {
idxDown = currIdx;
}
colUp = aImg.pixels.getPixel(idxUp);
colLeft = aImg.pixels.getPixel(idxLeft);
colDown = aImg.pixels.getPixel(idxDown);
colRight = aImg.pixels.getPixel(idxRight);
// compute luminance
currLum = 77*(colOrig>>16&0xff) + 151*(colOrig>>8&0xff) + 28*(colOrig&0xff);
lumLeft = 77*(colLeft>>16&0xff) + 151*(colLeft>>8&0xff) + 28*(colLeft&0xff);
lumRight = 77*(colRight>>16&0xff) + 151*(colRight>>8&0xff) + 28*(colRight&0xff);
lumUp = 77*(colUp>>16&0xff) + 151*(colUp>>8&0xff) + 28*(colUp&0xff);
lumDown = 77*(colDown>>16&0xff) + 151*(colDown>>8&0xff) + 28*(colDown&0xff);
if (lumLeft > currLum) {
colOut = colLeft;
currLum = lumLeft;
}
if (lumRight > currLum) {
colOut = colRight;
currLum = lumRight;
}
if (lumUp > currLum) {
colOut = colUp;
currLum = lumUp;
}
if (lumDown > currLum) {
colOut = colDown;
currLum = lumDown;
}
out[currIdx++] = colOut;
}
}
} else {
// dilate (grow dark areas)
while (currIdx < maxIdx) {
currRowIdx = currIdx;
maxRowIdx = currIdx + aImg.width;
while (currIdx < maxRowIdx) {
colOrig = colOut = aImg.pixels.getPixel(currIdx);
idxLeft = currIdx - 1;
idxRight = currIdx + 1;
idxUp = currIdx - aImg.width;
idxDown = currIdx + aImg.width;
if (idxLeft < currRowIdx) {
idxLeft = currIdx;
}
if (idxRight >= maxRowIdx) {
idxRight = currIdx;
}
if (idxUp < 0) {
idxUp = 0;
}
if (idxDown >= maxIdx) {
idxDown = currIdx;
}
colUp = aImg.pixels.getPixel(idxUp);
colLeft = aImg.pixels.getPixel(idxLeft);
colDown = aImg.pixels.getPixel(idxDown);
colRight = aImg.pixels.getPixel(idxRight);
// compute luminance
currLum = 77*(colOrig>>16&0xff) + 151*(colOrig>>8&0xff) + 28*(colOrig&0xff);
lumLeft = 77*(colLeft>>16&0xff) + 151*(colLeft>>8&0xff) + 28*(colLeft&0xff);
lumRight = 77*(colRight>>16&0xff) + 151*(colRight>>8&0xff) + 28*(colRight&0xff);
lumUp = 77*(colUp>>16&0xff) + 151*(colUp>>8&0xff) + 28*(colUp&0xff);
lumDown = 77*(colDown>>16&0xff) + 151*(colDown>>8&0xff) + 28*(colDown&0xff);
if (lumLeft < currLum) {
colOut = colLeft;
currLum = lumLeft;
}
if (lumRight < currLum) {
colOut = colRight;
currLum = lumRight;
}
if (lumUp < currLum) {
colOut = colUp;
currLum = lumUp;
}
if (lumDown < currLum) {
colOut = colDown;
currLum = lumDown;
}
out[currIdx++]=colOut;
}
}
}
aImg.pixels.set(out);
//p.arraycopy(out,0,pixels,0,maxIdx);
};
/**
* Filters the display window as defined by one of the following modes:
* THRESHOLD - converts the image to black and white pixels depending if they are above or below the threshold
* defined by the level parameter. The level must be between 0.0 (black) and 1.0(white). If no level is specified, 0.5 is used.
* GRAY - converts any colors in the image to grayscale equivalents
* INVERT - sets each pixel to its inverse value
* POSTERIZE - limits each channel of the image to the number of colors specified as the level parameter
* BLUR - executes a Guassian blur with the level parameter specifying the extent of the blurring. If no level parameter is
* used, the blur is equivalent to Guassian blur of radius 1.
* OPAQUE - sets the alpha channel to entirely opaque.
* ERODE - reduces the light areas with the amount defined by the level parameter.
* DILATE - increases the light areas with the amount defined by the level parameter.
*
* @param {MODE} MODE Either THRESHOLD, GRAY, INVERT, POSTERIZE, BLUR, OPAQUE, ERODE, or DILATE
* @param {int|float} level defines the quality of the filter
*
* @see blend
*/
p.filter = function(kind, param, aImg){
var img, col, lum, i;
if (arguments.length === 3) {
aImg.loadPixels();
img = aImg;
} else {
p.loadPixels();
img = p;
}
if (param === undef) {
param = null;
}
if (img.isRemote) { // Remote images cannot access imageData
throw "Image is loaded remotely. Cannot filter image.";
}
// begin filter process
var imglen = img.pixels.getLength();
switch (kind) {
case PConstants.BLUR:
var radius = param || 1; // if no param specified, use 1 (default for p5)
blurARGB(radius, img);
break;
case PConstants.GRAY:
if (img.format === PConstants.ALPHA) { //trouble
// for an alpha image, convert it to an opaque grayscale
for (i = 0; i < imglen; i++) {
col = 255 - img.pixels.getPixel(i);
img.pixels.setPixel(i,(0xff000000 | (col << 16) | (col << 8) | col));
}
img.format = PConstants.RGB; //trouble
} else {
for (i = 0; i < imglen; i++) {
col = img.pixels.getPixel(i);
lum = (77*(col>>16&0xff) + 151*(col>>8&0xff) + 28*(col&0xff))>>8;
img.pixels.setPixel(i,((col & PConstants.ALPHA_MASK) | lum<<16 | lum<<8 | lum));
}
}
break;
case PConstants.INVERT:
for (i = 0; i < imglen; i++) {
img.pixels.setPixel(i, (img.pixels.getPixel(i) ^ 0xffffff));
}
break;
case PConstants.POSTERIZE:
if (param === null) {
throw "Use filter(POSTERIZE, int levels) instead of filter(POSTERIZE)";
}
var levels = p.floor(param);
if ((levels < 2) || (levels > 255)) {
throw "Levels must be between 2 and 255 for filter(POSTERIZE, levels)";
}
var levels1 = levels - 1;
for (i = 0; i < imglen; i++) {
var rlevel = (img.pixels.getPixel(i) >> 16) & 0xff;
var glevel = (img.pixels.getPixel(i) >> 8) & 0xff;
var blevel = img.pixels.getPixel(i) & 0xff;
rlevel = (((rlevel * levels) >> 8) * 255) / levels1;
glevel = (((glevel * levels) >> 8) * 255) / levels1;
blevel = (((blevel * levels) >> 8) * 255) / levels1;
img.pixels.setPixel(i, ((0xff000000 & img.pixels.getPixel(i)) | (rlevel << 16) | (glevel << 8) | blevel));
}
break;
case PConstants.OPAQUE:
for (i = 0; i < imglen; i++) {
img.pixels.setPixel(i, (img.pixels.getPixel(i) | 0xff000000));
}
img.format = PConstants.RGB; //trouble
break;
case PConstants.THRESHOLD:
if (param === null) {
param = 0.5;
}
if ((param < 0) || (param > 1)) {
throw "Level must be between 0 and 1 for filter(THRESHOLD, level)";
}
var thresh = p.floor(param * 255);
for (i = 0; i < imglen; i++) {
var max = p.max((img.pixels.getPixel(i) & PConstants.RED_MASK) >> 16, p.max((img.pixels.getPixel(i) & PConstants.GREEN_MASK) >> 8, (img.pixels.getPixel(i) & PConstants.BLUE_MASK)));
img.pixels.setPixel(i, ((img.pixels.getPixel(i) & PConstants.ALPHA_MASK) | ((max < thresh) ? 0x000000 : 0xffffff)));
}
break;
case PConstants.ERODE:
dilate(true, img);
break;
case PConstants.DILATE:
dilate(false, img);
break;
}
img.updatePixels();
};
// shared variables for blit_resize(), filter_new_scanline(), filter_bilinear(), filter()
// change this in the future to not be exposed to p
p.shared = {
fracU: 0,
ifU: 0,
fracV: 0,
ifV: 0,
u1: 0,
u2: 0,
v1: 0,
v2: 0,
sX: 0,
sY: 0,
iw: 0,
iw1: 0,
ih1: 0,
ul: 0,
ll: 0,
ur: 0,
lr: 0,
cUL: 0,
cLL: 0,
cUR: 0,
cLR: 0,
srcXOffset: 0,
srcYOffset: 0,
r: 0,
g: 0,
b: 0,
a: 0,
srcBuffer: null,
blurRadius: 0,
blurKernelSize: 0,
blurKernel: null
};
p.intersect = function(sx1, sy1, sx2, sy2, dx1, dy1, dx2, dy2) {
var sw = sx2 - sx1 + 1;
var sh = sy2 - sy1 + 1;
var dw = dx2 - dx1 + 1;
var dh = dy2 - dy1 + 1;
if (dx1 < sx1) {
dw += dx1 - sx1;
if (dw > sw) {
dw = sw;
}
} else {
var w = sw + sx1 - dx1;
if (dw > w) {
dw = w;
}
}
if (dy1 < sy1) {
dh += dy1 - sy1;
if (dh > sh) {
dh = sh;
}
} else {
var h = sh + sy1 - dy1;
if (dh > h) {
dh = h;
}
}
return ! (dw <= 0 || dh <= 0);
};
var blendFuncs = {};
blendFuncs[PConstants.BLEND] = p.modes.blend;
blendFuncs[PConstants.ADD] = p.modes.add;
blendFuncs[PConstants.SUBTRACT] = p.modes.subtract;
blendFuncs[PConstants.LIGHTEST] = p.modes.lightest;
blendFuncs[PConstants.DARKEST] = p.modes.darkest;
blendFuncs[PConstants.REPLACE] = p.modes.replace;
blendFuncs[PConstants.DIFFERENCE] = p.modes.difference;
blendFuncs[PConstants.EXCLUSION] = p.modes.exclusion;
blendFuncs[PConstants.MULTIPLY] = p.modes.multiply;
blendFuncs[PConstants.SCREEN] = p.modes.screen;
blendFuncs[PConstants.OVERLAY] = p.modes.overlay;
blendFuncs[PConstants.HARD_LIGHT] = p.modes.hard_light;
blendFuncs[PConstants.SOFT_LIGHT] = p.modes.soft_light;
blendFuncs[PConstants.DODGE] = p.modes.dodge;
blendFuncs[PConstants.BURN] = p.modes.burn;
p.blit_resize = function(img, srcX1, srcY1, srcX2, srcY2, destPixels,
screenW, screenH, destX1, destY1, destX2, destY2, mode) {
var x, y;
if (srcX1 < 0) {
srcX1 = 0;
}
if (srcY1 < 0) {
srcY1 = 0;
}
if (srcX2 >= img.width) {
srcX2 = img.width - 1;
}
if (srcY2 >= img.height) {
srcY2 = img.height - 1;
}
var srcW = srcX2 - srcX1;
var srcH = srcY2 - srcY1;
var destW = destX2 - destX1;
var destH = destY2 - destY1;
if (destW <= 0 || destH <= 0 || srcW <= 0 || srcH <= 0 || destX1 >= screenW ||
destY1 >= screenH || srcX1 >= img.width || srcY1 >= img.height) {
return;
}
var dx = Math.floor(srcW / destW * PConstants.PRECISIONF);
var dy = Math.floor(srcH / destH * PConstants.PRECISIONF);
var pshared = p.shared;
pshared.srcXOffset = Math.floor(destX1 < 0 ? -destX1 * dx : srcX1 * PConstants.PRECISIONF);
pshared.srcYOffset = Math.floor(destY1 < 0 ? -destY1 * dy : srcY1 * PConstants.PRECISIONF);
if (destX1 < 0) {
destW += destX1;
destX1 = 0;
}
if (destY1 < 0) {
destH += destY1;
destY1 = 0;
}
destW = Math.min(destW, screenW - destX1);
destH = Math.min(destH, screenH - destY1);
var destOffset = destY1 * screenW + destX1;
var destColor;
pshared.srcBuffer = img.imageData.data;
pshared.iw = img.width;
pshared.iw1 = img.width - 1;
pshared.ih1 = img.height - 1;
// cache for speed
var filterBilinear = p.filter_bilinear,
filterNewScanline = p.filter_new_scanline,
blendFunc = blendFuncs[mode],
blendedColor,
idx,
cULoffset,
cURoffset,
cLLoffset,
cLRoffset,
ALPHA_MASK = PConstants.ALPHA_MASK,
RED_MASK = PConstants.RED_MASK,
GREEN_MASK = PConstants.GREEN_MASK,
BLUE_MASK = PConstants.BLUE_MASK,
PREC_MAXVAL = PConstants.PREC_MAXVAL,
PRECISIONB = PConstants.PRECISIONB,
PREC_RED_SHIFT = PConstants.PREC_RED_SHIFT,
PREC_ALPHA_SHIFT = PConstants.PREC_ALPHA_SHIFT,
srcBuffer = pshared.srcBuffer,
min = Math.min;
for (y = 0; y < destH; y++) {
pshared.sX = pshared.srcXOffset;
pshared.fracV = pshared.srcYOffset & PREC_MAXVAL;
pshared.ifV = PREC_MAXVAL - pshared.fracV;
pshared.v1 = (pshared.srcYOffset >> PRECISIONB) * pshared.iw;
pshared.v2 = min((pshared.srcYOffset >> PRECISIONB) + 1, pshared.ih1) * pshared.iw;
for (x = 0; x < destW; x++) {
idx = (destOffset + x) * 4;
destColor = (destPixels[idx + 3] << 24) &
ALPHA_MASK | (destPixels[idx] << 16) &
RED_MASK | (destPixels[idx + 1] << 8) &
GREEN_MASK | destPixels[idx + 2] & BLUE_MASK;
pshared.fracU = pshared.sX & PREC_MAXVAL;
pshared.ifU = PREC_MAXVAL - pshared.fracU;
pshared.ul = (pshared.ifU * pshared.ifV) >> PRECISIONB;
pshared.ll = (pshared.ifU * pshared.fracV) >> PRECISIONB;
pshared.ur = (pshared.fracU * pshared.ifV) >> PRECISIONB;
pshared.lr = (pshared.fracU * pshared.fracV) >> PRECISIONB;
pshared.u1 = (pshared.sX >> PRECISIONB);
pshared.u2 = min(pshared.u1 + 1, pshared.iw1);
cULoffset = (pshared.v1 + pshared.u1) * 4;
cURoffset = (pshared.v1 + pshared.u2) * 4;
cLLoffset = (pshared.v2 + pshared.u1) * 4;
cLRoffset = (pshared.v2 + pshared.u2) * 4;
pshared.cUL = (srcBuffer[cULoffset + 3] << 24) &
ALPHA_MASK | (srcBuffer[cULoffset] << 16) &
RED_MASK | (srcBuffer[cULoffset + 1] << 8) &
GREEN_MASK | srcBuffer[cULoffset + 2] & BLUE_MASK;
pshared.cUR = (srcBuffer[cURoffset + 3] << 24) &
ALPHA_MASK | (srcBuffer[cURoffset] << 16) &
RED_MASK | (srcBuffer[cURoffset + 1] << 8) &
GREEN_MASK | srcBuffer[cURoffset + 2] & BLUE_MASK;
pshared.cLL = (srcBuffer[cLLoffset + 3] << 24) &
ALPHA_MASK | (srcBuffer[cLLoffset] << 16) &
RED_MASK | (srcBuffer[cLLoffset + 1] << 8) &
GREEN_MASK | srcBuffer[cLLoffset + 2] & BLUE_MASK;
pshared.cLR = (srcBuffer[cLRoffset + 3] << 24) &
ALPHA_MASK | (srcBuffer[cLRoffset] << 16) &
RED_MASK | (srcBuffer[cLRoffset + 1] << 8) &
GREEN_MASK | srcBuffer[cLRoffset + 2] & BLUE_MASK;
pshared.r = ((pshared.ul * ((pshared.cUL & RED_MASK) >> 16) +
pshared.ll * ((pshared.cLL & RED_MASK) >> 16) +
pshared.ur * ((pshared.cUR & RED_MASK) >> 16) +
pshared.lr * ((pshared.cLR & RED_MASK) >> 16)) << PREC_RED_SHIFT) & RED_MASK;
pshared.g = ((pshared.ul * (pshared.cUL & GREEN_MASK) +
pshared.ll * (pshared.cLL & GREEN_MASK) +
pshared.ur * (pshared.cUR & GREEN_MASK) +
pshared.lr * (pshared.cLR & GREEN_MASK)) >>> PRECISIONB) & GREEN_MASK;
pshared.b = (pshared.ul * (pshared.cUL & BLUE_MASK) +
pshared.ll * (pshared.cLL & BLUE_MASK) +
pshared.ur * (pshared.cUR & BLUE_MASK) +
pshared.lr * (pshared.cLR & BLUE_MASK)) >>> PRECISIONB;
pshared.a = ((pshared.ul * ((pshared.cUL & ALPHA_MASK) >>> 24) +
pshared.ll * ((pshared.cLL & ALPHA_MASK) >>> 24) +
pshared.ur * ((pshared.cUR & ALPHA_MASK) >>> 24) +
pshared.lr * ((pshared.cLR & ALPHA_MASK) >>> 24)) << PREC_ALPHA_SHIFT) & ALPHA_MASK;
blendedColor = blendFunc(destColor, (pshared.a | pshared.r | pshared.g | pshared.b));
destPixels[idx] = (blendedColor & RED_MASK) >>> 16;
destPixels[idx + 1] = (blendedColor & GREEN_MASK) >>> 8;
destPixels[idx + 2] = (blendedColor & BLUE_MASK);
destPixels[idx + 3] = (blendedColor & ALPHA_MASK) >>> 24;
pshared.sX += dx;
}
destOffset += screenW;
pshared.srcYOffset += dy;
}
};
////////////////////////////////////////////////////////////////////////////
// Font handling
////////////////////////////////////////////////////////////////////////////
/**
* loadFont() Loads a font into a variable of type PFont.
*
* @param {String} name filename of the font to load
* @param {int|float} size option font size (used internally)
*
* @returns {PFont} new PFont object
*
* @see #PFont
* @see #textFont
* @see #text
* @see #createFont
*/
p.loadFont = function(name, size) {
if (name === undef) {
throw("font name required in loadFont.");
}
if (name.indexOf(".svg") === -1) {
if (size === undef) {
size = curTextFont.size;
}
return PFont.get(name, size);
}
// If the font is a glyph, calculate by SVG table
var font = p.loadGlyphs(name);
return {
name: name,
css: '12px sans-serif',
glyph: true,
units_per_em: font.units_per_em,
horiz_adv_x: 1 / font.units_per_em * font.horiz_adv_x,
ascent: font.ascent,
descent: font.descent,
width: function(str) {
var width = 0;
var len = str.length;
for (var i = 0; i < len; i++) {
try {
width += parseFloat(p.glyphLook(p.glyphTable[name], str[i]).horiz_adv_x);
}
catch(e) {
Processing.debug(e);
}
}
return width / p.glyphTable[name].units_per_em;
}
};
};
/**
* createFont() Loads a font into a variable of type PFont.
* Smooth and charset are ignored in Processing.js.
*
* @param {String} name filename of the font to load
* @param {int|float} size font size in pixels
* @param {boolean} smooth not used in Processing.js
* @param {char[]} charset not used in Processing.js
*
* @returns {PFont} new PFont object
*
* @see #PFont
* @see #textFont
* @see #text
* @see #loadFont
*/
p.createFont = function(name, size) {
// because Processing.js only deals with real fonts,
// createFont is simply a wrapper for loadFont/2
return p.loadFont(name, size);
};
/**
* textFont() Sets the current font.
*
* @param {PFont} pfont the PFont to load as current text font
* @param {int|float} size optional font size in pixels
*
* @see #createFont
* @see #loadFont
* @see #PFont
* @see #text
*/
p.textFont = function(pfont, size) {
if (size !== undef) {
// If we're using an SVG glyph font, don't load from cache
if (!pfont.glyph) {
pfont = PFont.get(pfont.name, size);
}
curTextSize = size;
}
curTextFont = pfont;
curFontName = curTextFont.name;
curTextAscent = curTextFont.ascent;
curTextDescent = curTextFont.descent;
curTextLeading = curTextFont.leading;
var curContext = drawing.$ensureContext();
curContext.font = curTextFont.css;
};
/**
* textSize() Sets the current font size in pixels.
*
* @param {int|float} size font size in pixels
*
* @see #textFont
* @see #loadFont
* @see #PFont
* @see #text
*/
p.textSize = function(size) {
if (size !== curTextSize) {
// round size to the nearest tenth so that we don't explode the cache
size = Math.round(10 * size) / 10;
curTextFont = PFont.get(curFontName, size);
curTextSize = size;
// recache metrics
curTextAscent = curTextFont.ascent;
curTextDescent = curTextFont.descent;
curTextLeading = curTextFont.leading;
var curContext = drawing.$ensureContext();
curContext.font = curTextFont.css;
}
};
/**
* textAscent() returns the maximum height a character extends above the baseline of the
* current font at its current size, in pixels.
*
* @returns {float} height of the current font above the baseline, at its current size, in pixels
*
* @see #textDescent
*/
p.textAscent = function() {
return curTextAscent;
};
/**
* textDescent() returns the maximum depth a character will protrude below the baseline of
* the current font at its current size, in pixels.
*
* @returns {float} depth of the current font below the baseline, at its current size, in pixels
*
* @see #textAscent
*/
p.textDescent = function() {
return curTextDescent;
};
/**
* textLeading() Sets the current font's leading, which is the distance
* from baseline to baseline over consecutive lines, with additional vertical
* spacing taking into account. Usually this value is 1.2 or 1.25 times the
* textsize, but this value can be changed to effect vertically compressed
* or stretched text.
*
* @param {int|float} the desired baseline-to-baseline size in pixels
*/
p.textLeading = function(leading) {
curTextLeading = leading;
};
/**
* textAlign() Sets the current alignment for drawing text.
*
* @param {int} ALIGN Horizontal alignment, either LEFT, CENTER, or RIGHT
* @param {int} YALIGN optional vertical alignment, either TOP, BOTTOM, CENTER, or BASELINE
*
* @see #loadFont
* @see #PFont
* @see #text
*/
p.textAlign = function(xalign, yalign) {
horizontalTextAlignment = xalign;
verticalTextAlignment = yalign || PConstants.BASELINE;
};
/**
* toP5String converts things with arbitrary data type into
* string values, for text rendering.
*
* @param {any} any object that can be converted into a string
*
* @return {String} the string representation of the input
*/
function toP5String(obj) {
if(obj instanceof String) {
return obj;
}
if(typeof obj === 'number') {
// check if an int
if(obj === (0 | obj)) {
return obj.toString();
}
return p.nf(obj, 0, 3);
}
if(obj === null || obj === undef) {
return "";
}
return obj.toString();
}
/**
* textWidth() Calculates and returns the width of any character or text string in pixels.
*
* @param {char|String} str char or String to be measured
*
* @return {float} width of char or String in pixels
*
* @see #loadFont
* @see #PFont
* @see #text
* @see #textFont
*/
Drawing2D.prototype.textWidth = function(str) {
var lines = toP5String(str).split(/\r?\n/g), width = 0;
var i, linesCount = lines.length;
curContext.font = curTextFont.css;
for (i = 0; i < linesCount; ++i) {
width = Math.max(width, curTextFont.measureTextWidth(lines[i]));
}
return width | 0;
};
Drawing3D.prototype.textWidth = function(str) {
var lines = toP5String(str).split(/\r?\n/g), width = 0;
var i, linesCount = lines.length;
if (textcanvas === undef) {
textcanvas = document.createElement("canvas");
}
var textContext = textcanvas.getContext("2d");
textContext.font = curTextFont.css;
for (i = 0; i < linesCount; ++i) {
width = Math.max(width, textContext.measureText(lines[i]).width);
}
return width | 0;
};
// A lookup table for characters that can not be referenced by Object
p.glyphLook = function(font, chr) {
try {
switch (chr) {
case "1":
return font.one;
case "2":
return font.two;
case "3":
return font.three;
case "4":
return font.four;
case "5":
return font.five;
case "6":
return font.six;
case "7":
return font.seven;
case "8":
return font.eight;
case "9":
return font.nine;
case "0":
return font.zero;
case " ":
return font.space;
case "$":
return font.dollar;
case "!":
return font.exclam;
case '"':
return font.quotedbl;
case "#":
return font.numbersign;
case "%":
return font.percent;
case "&":
return font.ampersand;
case "'":
return font.quotesingle;
case "(":
return font.parenleft;
case ")":
return font.parenright;
case "*":
return font.asterisk;
case "+":
return font.plus;
case ",":
return font.comma;
case "-":
return font.hyphen;
case ".":
return font.period;
case "/":
return font.slash;
case "_":
return font.underscore;
case ":":
return font.colon;
case ";":
return font.semicolon;
case "<":
return font.less;
case "=":
return font.equal;
case ">":
return font.greater;
case "?":
return font.question;
case "@":
return font.at;
case "[":
return font.bracketleft;
case "\\":
return font.backslash;
case "]":
return font.bracketright;
case "^":
return font.asciicircum;
case "`":
return font.grave;
case "{":
return font.braceleft;
case "|":
return font.bar;
case "}":
return font.braceright;
case "~":
return font.asciitilde;
// If the character is not 'special', access it by object reference
default:
return font[chr];
}
} catch(e) {
Processing.debug(e);
}
};
// Print some text to the Canvas
Drawing2D.prototype.text$line = function(str, x, y, z, align) {
var textWidth = 0, xOffset = 0;
// If the font is a standard Canvas font...
if (!curTextFont.glyph) {
if (str && ("fillText" in curContext)) {
if (isFillDirty) {
curContext.fillStyle = p.color.toString(currentFillColor);
isFillDirty = false;
}
// horizontal offset/alignment
if(align === PConstants.RIGHT || align === PConstants.CENTER) {
textWidth = curTextFont.measureTextWidth(str);
if(align === PConstants.RIGHT) {
xOffset = -textWidth;
} else { // if(align === PConstants.CENTER)
xOffset = -textWidth/2;
}
}
curContext.fillText(str, x+xOffset, y);
}
} else {
// If the font is a Batik SVG font...
var font = p.glyphTable[curFontName];
saveContext();
curContext.translate(x, y + curTextSize);
// horizontal offset/alignment
if(align === PConstants.RIGHT || align === PConstants.CENTER) {
textWidth = font.width(str);
if(align === PConstants.RIGHT) {
xOffset = -textWidth;
} else { // if(align === PConstants.CENTER)
xOffset = -textWidth/2;
}
}
var upem = font.units_per_em,
newScale = 1 / upem * curTextSize;
curContext.scale(newScale, newScale);
for (var i=0, len=str.length; i < len; i++) {
// Test character against glyph table
try {
p.glyphLook(font, str[i]).draw();
} catch(e) {
Processing.debug(e);
}
}
restoreContext();
}
};
Drawing3D.prototype.text$line = function(str, x, y, z, align) {
// handle case for 3d text
if (textcanvas === undef) {
textcanvas = document.createElement("canvas");
}
var oldContext = curContext;
curContext = textcanvas.getContext("2d");
curContext.font = curTextFont.css;
var textWidth = curTextFont.measureTextWidth(str);
textcanvas.width = textWidth;
textcanvas.height = curTextSize;
curContext = textcanvas.getContext("2d"); // refreshes curContext
curContext.font = curTextFont.css;
curContext.textBaseline="top";
// paint on 2D canvas
Drawing2D.prototype.text$line(str,0,0,0,PConstants.LEFT);
// use it as a texture
var aspect = textcanvas.width/textcanvas.height;
curContext = oldContext;
curContext.bindTexture(curContext.TEXTURE_2D, textTex);
curContext.texImage2D(curContext.TEXTURE_2D, 0, curContext.RGBA, curContext.RGBA, curContext.UNSIGNED_BYTE, textcanvas);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_MAG_FILTER, curContext.LINEAR);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_MIN_FILTER, curContext.LINEAR);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_WRAP_T, curContext.CLAMP_TO_EDGE);
curContext.texParameteri(curContext.TEXTURE_2D, curContext.TEXTURE_WRAP_S, curContext.CLAMP_TO_EDGE);
// If we don't have a power of two texture, we can't mipmap it.
// curContext.generateMipmap(curContext.TEXTURE_2D);
// horizontal offset/alignment
var xOffset = 0;
if (align === PConstants.RIGHT) {
xOffset = -textWidth;
} else if(align === PConstants.CENTER) {
xOffset = -textWidth/2;
}
var model = new PMatrix3D();
var scalefactor = curTextSize * 0.5;
model.translate(x+xOffset-scalefactor/2, y-scalefactor, z);
model.scale(-aspect*scalefactor, -scalefactor, scalefactor);
model.translate(-1, -1, -1);
model.transpose();
var view = new PMatrix3D();
view.scale(1, -1, 1);
view.apply(modelView.array());
view.transpose();
curContext.useProgram(programObject2D);
vertexAttribPointer("vertex2d", programObject2D, "Vertex", 3, textBuffer);
vertexAttribPointer("aTextureCoord2d", programObject2D, "aTextureCoord", 2, textureBuffer);
uniformi("uSampler2d", programObject2D, "uSampler", [0]);
uniformi("picktype2d", programObject2D, "picktype", 1);
uniformMatrix("model2d", programObject2D, "model", false, model.array());
uniformMatrix("view2d", programObject2D, "view", false, view.array());
uniformf("color2d", programObject2D, "color", fillStyle);
curContext.bindBuffer(curContext.ELEMENT_ARRAY_BUFFER, indexBuffer);
curContext.drawElements(curContext.TRIANGLES, 6, curContext.UNSIGNED_SHORT, 0);
};
/**
* unbounded text function (z is an optional argument)
*/
function text$4(str, x, y, z) {
var lines, linesCount;
if(str.indexOf('\n') < 0) {
lines = [str];
linesCount = 1;
} else {
lines = str.split(/\r?\n/g);
linesCount = lines.length;
}
// handle text line-by-line
var yOffset = 0;
if(verticalTextAlignment === PConstants.TOP) {
yOffset = curTextAscent + curTextDescent;
} else if(verticalTextAlignment === PConstants.CENTER) {
yOffset = curTextAscent/2 - (linesCount-1)*curTextLeading/2;
} else if(verticalTextAlignment === PConstants.BOTTOM) {
yOffset = -(curTextDescent + (linesCount-1)*curTextLeading);
}
for(var i=0;i height) {
return;
}
var spaceMark = -1;
var start = 0;
var lineWidth = 0;
var drawCommands = [];
// run through text, character-by-character
for (var charPos=0, len=str.length; charPos < len; charPos++)
{
var currentChar = str[charPos];
var spaceChar = (currentChar === " ");
var letterWidth = curTextFont.measureTextWidth(currentChar);
// if we aren't looking at a newline, and the text still fits, keep processing
if (currentChar !== "\n" && (lineWidth + letterWidth <= width)) {
if (spaceChar) { spaceMark = charPos; }
lineWidth += letterWidth;
}
// if we're looking at a newline, or the text no longer fits, push the section that fit into the drawcommand list
else
{
if (spaceMark + 1 === start) {
if(charPos>0) {
// Whole line without spaces so far.
spaceMark = charPos;
} else {
// 'fail', because the line can't even fit the first character
return;
}
}
if (currentChar === "\n") {
drawCommands.push({text:str.substring(start, charPos), width: lineWidth});
start = charPos + 1;
} else {
// current is not a newline, which means the line doesn't fit in box. push text.
// In Processing 1.5.1, the space is also pushed, so we push up to spaceMark+1,
// rather than up to spaceMark, as was the case for Processing 1.5 and earlier.
drawCommands.push({text:str.substring(start, spaceMark+1), width: lineWidth});
start = spaceMark + 1;
}
// newline + return
lineWidth = 0;
charPos = start - 1;
}
}
// push the remaining text
if (start < len) {
drawCommands.push({text:str.substring(start), width: lineWidth});
}
// resolve horizontal alignment
var xOffset = 1,
yOffset = curTextAscent;
if (horizontalTextAlignment === PConstants.CENTER) {
xOffset = width/2;
} else if (horizontalTextAlignment === PConstants.RIGHT) {
xOffset = width;
}
// resolve vertical alignment
var linesCount = drawCommands.length,
visibleLines = Math.min(linesCount, Math.floor(height/curTextLeading));
if(verticalTextAlignment === PConstants.TOP) {
yOffset = curTextAscent + curTextDescent;
} else if(verticalTextAlignment === PConstants.CENTER) {
yOffset = (height/2) - curTextLeading * (visibleLines/2 - 1);
} else if(verticalTextAlignment === PConstants.BOTTOM) {
yOffset = curTextDescent + curTextLeading;
}
var command,
drawCommand,
leading;
for (command = 0; command < linesCount; command++) {
leading = command * curTextLeading;
// stop if not enough space for one more line draw
if (yOffset + leading > height - curTextDescent) {
break;
}
drawCommand = drawCommands[command];
drawing.text$line(drawCommand.text, x + xOffset, y + yOffset + leading, z, horizontalTextAlignment);
}
}
/**
* text() Draws text to the screen.
*
* @param {String|char|int|float} data the alphanumeric symbols to be displayed
* @param {int|float} x x-coordinate of text
* @param {int|float} y y-coordinate of text
* @param {int|float} z optional z-coordinate of text
* @param {String} stringdata optional letters to be displayed
* @param {int|float} width optional width of text box
* @param {int|float} height optional height of text box
*
* @see #textAlign
* @see #textMode
* @see #loadFont
* @see #PFont
* @see #textFont
*/
p.text = function() {
//XXX(jeresig): Fix font constantly resetting
if (curContext.font !== curTextFont.css) {
curContext.font = curTextFont.css;
}
if (textMode === PConstants.SHAPE) {
// TODO: requires beginRaw function
return;
}
if (arguments.length === 3) { // for text( str, x, y)
text$4(toP5String(arguments[0]), arguments[1], arguments[2], 0);
} else if (arguments.length === 4) { // for text( str, x, y, z)
text$4(toP5String(arguments[0]), arguments[1], arguments[2], arguments[3]);
} else if (arguments.length === 5) { // for text( str, x, y , width, height)
text$6(toP5String(arguments[0]), arguments[1], arguments[2], arguments[3], arguments[4], 0);
} else if (arguments.length === 6) { // for text( stringdata, x, y , width, height, z)
text$6(toP5String(arguments[0]), arguments[1], arguments[2], arguments[3], arguments[4], arguments[5]);
}
};
/**
* Sets the way text draws to the screen. In the default configuration (the MODEL mode), it's possible to rotate,
* scale, and place letters in two and three dimensional space.
Changing to SCREEN mode draws letters
* directly to the front of the window and greatly increases rendering quality and speed when used with the P2D and
* P3D renderers. textMode(SCREEN) with OPENGL and JAVA2D (the default) renderers will generally be slower, though
* pixel accurate with P2D and P3D. With textMode(SCREEN), the letters draw at the actual size of the font (in pixels)
* and therefore calls to textSize() will not affect the size of the letters. To create a font at the size you
* desire, use the "Create font..." option in the Tools menu, or use the createFont() function. When using textMode(SCREEN),
* any z-coordinate passed to a text() command will be ignored, because your computer screen is...flat!
*
* @param {int} MODE Either MODEL, SCREEN or SHAPE (not yet supported)
*
* @see loadFont
* @see PFont
* @see text
* @see textFont
* @see createFont
*/
p.textMode = function(mode){
textMode = mode;
};
// Load Batik SVG Fonts and parse to pre-def objects for quick rendering
p.loadGlyphs = function(url) {
var x, y, cx, cy, nx, ny, d, a, lastCom, lenC, horiz_adv_x, getXY = '[0-9\\-]+', path;
// Return arrays of SVG commands and coords
// get this to use p.matchAll() - will need to work around the lack of null return
var regex = function(needle, hay) {
var i = 0,
results = [],
latest, regexp = new RegExp(needle, "g");
latest = results[i] = regexp.exec(hay);
while (latest) {
i++;
latest = results[i] = regexp.exec(hay);
}
return results;
};
var buildPath = function(d) {
var c = regex("[A-Za-z][0-9\\- ]+|Z", d);
var beforePathDraw = function() {
saveContext();
return drawing.$ensureContext();
};
var afterPathDraw = function() {
executeContextFill();
executeContextStroke();
restoreContext();
};
// Begin storing path object
path = "return {draw:function(){var curContext=beforePathDraw();curContext.beginPath();";
x = 0;
y = 0;
cx = 0;
cy = 0;
nx = 0;
ny = 0;
d = 0;
a = 0;
lastCom = "";
lenC = c.length - 1;
// Loop through SVG commands translating to canvas eqivs functions in path object
for (var j = 0; j < lenC; j++) {
var com = c[j][0], xy = regex(getXY, com);
switch (com[0]) {
case "M":
//curContext.moveTo(x,-y);
x = parseFloat(xy[0][0]);
y = parseFloat(xy[1][0]);
path += "curContext.moveTo(" + x + "," + (-y) + ");";
break;
case "L":
//curContext.lineTo(x,-y);
x = parseFloat(xy[0][0]);
y = parseFloat(xy[1][0]);
path += "curContext.lineTo(" + x + "," + (-y) + ");";
break;
case "H":
//curContext.lineTo(x,-y)
x = parseFloat(xy[0][0]);
path += "curContext.lineTo(" + x + "," + (-y) + ");";
break;
case "V":
//curContext.lineTo(x,-y);
y = parseFloat(xy[0][0]);
path += "curContext.lineTo(" + x + "," + (-y) + ");";
break;
case "T":
//curContext.quadraticCurveTo(cx,-cy,nx,-ny);
nx = parseFloat(xy[0][0]);
ny = parseFloat(xy[1][0]);
if (lastCom === "Q" || lastCom === "T") {
d = Math.sqrt(Math.pow(x - cx, 2) + Math.pow(cy - y, 2));
// XXX(jeresig)
a = (p.angleMode === "degrees" ? 180 : Math.PI) + p.atan2(cx - x, cy - y);
cx = x + p.sin(a) * d;
cy = y + p.cos(a) * d;
} else {
cx = x;
cy = y;
}
path += "curContext.quadraticCurveTo(" + cx + "," + (-cy) + "," + nx + "," + (-ny) + ");";
x = nx;
y = ny;
break;
case "Q":
//curContext.quadraticCurveTo(cx,-cy,nx,-ny);
cx = parseFloat(xy[0][0]);
cy = parseFloat(xy[1][0]);
nx = parseFloat(xy[2][0]);
ny = parseFloat(xy[3][0]);
path += "curContext.quadraticCurveTo(" + cx + "," + (-cy) + "," + nx + "," + (-ny) + ");";
x = nx;
y = ny;
break;
case "Z":
//curContext.closePath();
path += "curContext.closePath();";
break;
}
lastCom = com[0];
}
path += "afterPathDraw();";
path += "curContext.translate(" + horiz_adv_x + ",0);";
path += "}}";
return ((new Function("beforePathDraw", "afterPathDraw", path))(beforePathDraw, afterPathDraw));
};
// Parse SVG font-file into block of Canvas commands
var parseSVGFont = function(svg) {
// Store font attributes
var font = svg.getElementsByTagName("font");
p.glyphTable[url].horiz_adv_x = font[0].getAttribute("horiz-adv-x");
var font_face = svg.getElementsByTagName("font-face")[0];
p.glyphTable[url].units_per_em = parseFloat(font_face.getAttribute("units-per-em"));
p.glyphTable[url].ascent = parseFloat(font_face.getAttribute("ascent"));
p.glyphTable[url].descent = parseFloat(font_face.getAttribute("descent"));
var glyph = svg.getElementsByTagName("glyph"),
len = glyph.length;
// Loop through each glyph in the SVG
for (var i = 0; i < len; i++) {
// Store attributes for this glyph
var unicode = glyph[i].getAttribute("unicode");
var name = glyph[i].getAttribute("glyph-name");
horiz_adv_x = glyph[i].getAttribute("horiz-adv-x");
if (horiz_adv_x === null) {
horiz_adv_x = p.glyphTable[url].horiz_adv_x;
}
d = glyph[i].getAttribute("d");
// Split path commands in glpyh
if (d !== undef) {
path = buildPath(d);
// Store glyph data to table object
p.glyphTable[url][name] = {
name: name,
unicode: unicode,
horiz_adv_x: horiz_adv_x,
draw: path.draw
};
}
} // finished adding glyphs to table
};
// Load and parse Batik SVG font as XML into a Processing Glyph object
var loadXML = function() {
var xmlDoc;
try {
xmlDoc = document.implementation.createDocument("", "", null);
}
catch(e_fx_op) {
Processing.debug(e_fx_op.message);
return;
}
try {
xmlDoc.async = false;
xmlDoc.load(url);
parseSVGFont(xmlDoc.getElementsByTagName("svg")[0]);
}
catch(e_sf_ch) {
// Google Chrome, Safari etc.
Processing.debug(e_sf_ch);
try {
var xmlhttp = new window.XMLHttpRequest();
xmlhttp.open("GET", url, false);
xmlhttp.send(null);
parseSVGFont(xmlhttp.responseXML.documentElement);
}
catch(e) {
Processing.debug(e_sf_ch);
}
}
};
// Create a new object in glyphTable to store this font
p.glyphTable[url] = {};
// Begin loading the Batik SVG font...
loadXML(url);
// Return the loaded font for attribute grabbing
return p.glyphTable[url];
};
/**
* Gets the sketch parameter value. The parameter can be defined as the canvas attribute with
* the "data-processing-" prefix or provided in the pjs directive (e.g. param-test="52").
* The function tries the canvas attributes, then the pjs directive content.
*
* @param {String} name The name of the param to read.
*
* @returns {String} The parameter value, or null if parameter is not defined.
*/
p.param = function(name) {
// trying attribute that was specified in CANVAS
var attributeName = "data-processing-" + name;
if (curElement.hasAttribute(attributeName)) {
return curElement.getAttribute(attributeName);
}
// trying child PARAM elements of the CANVAS
for (var i = 0, len = curElement.childNodes.length; i < len; ++i) {
var item = curElement.childNodes.item(i);
if (item.nodeType !== 1 || item.tagName.toLowerCase() !== "param") {
continue;
}
if (item.getAttribute("name") === name) {
return item.getAttribute("value");
}
}
// fallback to default params
if (curSketch.params.hasOwnProperty(name)) {
return curSketch.params[name];
}
return null;
};
////////////////////////////////////////////////////////////////////////////
// 2D/3D methods wiring utils
////////////////////////////////////////////////////////////////////////////
function wireDimensionalFunctions(mode) {
// Drawing2D/Drawing3D
if (mode === '3D') {
drawing = new Drawing3D();
} else if (mode === '2D') {
drawing = new Drawing2D();
} else {
drawing = new DrawingPre();
}
// Wire up functions (Use DrawingPre properties names)
for (var i in DrawingPre.prototype) {
if (DrawingPre.prototype.hasOwnProperty(i) && i.indexOf("$") < 0) {
p[i] = drawing[i];
}
}
// Run initialization
drawing.$init();
}
function createDrawingPreFunction(name) {
return function() {
wireDimensionalFunctions("2D");
return drawing[name].apply(this, arguments);
};
}
DrawingPre.prototype.translate = createDrawingPreFunction("translate");
DrawingPre.prototype.scale = createDrawingPreFunction("scale");
DrawingPre.prototype.pushMatrix = createDrawingPreFunction("pushMatrix");
DrawingPre.prototype.popMatrix = createDrawingPreFunction("popMatrix");
DrawingPre.prototype.resetMatrix = createDrawingPreFunction("resetMatrix");
DrawingPre.prototype.applyMatrix = createDrawingPreFunction("applyMatrix");
DrawingPre.prototype.rotate = createDrawingPreFunction("rotate");
DrawingPre.prototype.rotateZ = createDrawingPreFunction("rotateZ");
DrawingPre.prototype.redraw = createDrawingPreFunction("redraw");
DrawingPre.prototype.toImageData = createDrawingPreFunction("toImageData");
DrawingPre.prototype.ambientLight = createDrawingPreFunction("ambientLight");
DrawingPre.prototype.directionalLight = createDrawingPreFunction("directionalLight");
DrawingPre.prototype.lightFalloff = createDrawingPreFunction("lightFalloff");
DrawingPre.prototype.lightSpecular = createDrawingPreFunction("lightSpecular");
DrawingPre.prototype.pointLight = createDrawingPreFunction("pointLight");
DrawingPre.prototype.noLights = createDrawingPreFunction("noLights");
DrawingPre.prototype.spotLight = createDrawingPreFunction("spotLight");
DrawingPre.prototype.beginCamera = createDrawingPreFunction("beginCamera");
DrawingPre.prototype.endCamera = createDrawingPreFunction("endCamera");
DrawingPre.prototype.frustum = createDrawingPreFunction("frustum");
DrawingPre.prototype.box = createDrawingPreFunction("box");
DrawingPre.prototype.sphere = createDrawingPreFunction("sphere");
DrawingPre.prototype.ambient = createDrawingPreFunction("ambient");
DrawingPre.prototype.emissive = createDrawingPreFunction("emissive");
DrawingPre.prototype.shininess = createDrawingPreFunction("shininess");
DrawingPre.prototype.specular = createDrawingPreFunction("specular");
DrawingPre.prototype.fill = createDrawingPreFunction("fill");
DrawingPre.prototype.stroke = createDrawingPreFunction("stroke");
DrawingPre.prototype.strokeWeight = createDrawingPreFunction("strokeWeight");
DrawingPre.prototype.smooth = createDrawingPreFunction("smooth");
DrawingPre.prototype.noSmooth = createDrawingPreFunction("noSmooth");
DrawingPre.prototype.point = createDrawingPreFunction("point");
DrawingPre.prototype.vertex = createDrawingPreFunction("vertex");
DrawingPre.prototype.endShape = createDrawingPreFunction("endShape");
DrawingPre.prototype.bezierVertex = createDrawingPreFunction("bezierVertex");
DrawingPre.prototype.curveVertex = createDrawingPreFunction("curveVertex");
DrawingPre.prototype.curve = createDrawingPreFunction("curve");
DrawingPre.prototype.line = createDrawingPreFunction("line");
DrawingPre.prototype.bezier = createDrawingPreFunction("bezier");
DrawingPre.prototype.rect = createDrawingPreFunction("rect");
DrawingPre.prototype.ellipse = createDrawingPreFunction("ellipse");
DrawingPre.prototype.background = createDrawingPreFunction("background");
DrawingPre.prototype.image = createDrawingPreFunction("image");
DrawingPre.prototype.textWidth = createDrawingPreFunction("textWidth");
DrawingPre.prototype.text$line = createDrawingPreFunction("text$line");
DrawingPre.prototype.$ensureContext = createDrawingPreFunction("$ensureContext");
DrawingPre.prototype.$newPMatrix = createDrawingPreFunction("$newPMatrix");
DrawingPre.prototype.size = function(aWidth, aHeight, aMode) {
wireDimensionalFunctions(aMode === PConstants.WEBGL ? "3D" : "2D");
p.size(aWidth, aHeight, aMode);
};
DrawingPre.prototype.$init = nop;
Drawing2D.prototype.$init = function() {
// Setup default 2d canvas context.
// Moving this here removes the number of times we need to check the 3D variable
p.size(p.width, p.height);
curContext.lineCap = 'round';
// Set default stroke and fill color
p.noSmooth();
p.disableContextMenu();
};
Drawing3D.prototype.$init = function() {
// For ref/perf test compatibility until those are fixed
p.use3DContext = true;
};
DrawingShared.prototype.$ensureContext = function() {
return curContext;
};
//////////////////////////////////////////////////////////////////////////
// Touch and Mouse event handling
//////////////////////////////////////////////////////////////////////////
function calculateOffset(curElement, event) {
var element = curElement,
offsetX = 0,
offsetY = 0;
p.pmouseX = p.mouseX;
p.pmouseY = p.mouseY;
// Find element offset
if (element.offsetParent) {
do {
offsetX += element.offsetLeft;
offsetY += element.offsetTop;
} while (!!(element = element.offsetParent));
}
// Find Scroll offset
element = curElement;
do {
offsetX -= element.scrollLeft || 0;
offsetY -= element.scrollTop || 0;
} while (!!(element = element.parentNode));
// Add padding and border style widths to offset
offsetX += stylePaddingLeft;
offsetY += stylePaddingTop;
offsetX += styleBorderLeft;
offsetY += styleBorderTop;
// Take into account any scrolling done
offsetX += window.pageXOffset;
offsetY += window.pageYOffset;
return {'X':offsetX,'Y':offsetY};
}
function updateMousePosition(curElement, event) {
var offset = calculateOffset(curElement, event);
// Dropping support for IE clientX and clientY, switching to pageX and pageY so we don't have to calculate scroll offset.
// Removed in ticket #184. See rev: 2f106d1c7017fed92d045ba918db47d28e5c16f4
p.mouseX = event.pageX - offset.X;
p.mouseY = event.pageY - offset.Y;
}
// Return a TouchEvent with canvas-specific x/y co-ordinates
function addTouchEventOffset(t) {
var offset = calculateOffset(t.changedTouches[0].target, t.changedTouches[0]),
i;
for (i = 0; i < t.touches.length; i++) {
var touch = t.touches[i];
touch.offsetX = touch.pageX - offset.X;
touch.offsetY = touch.pageY - offset.Y;
}
for (i = 0; i < t.targetTouches.length; i++) {
var targetTouch = t.targetTouches[i];
targetTouch.offsetX = targetTouch.pageX - offset.X;
targetTouch.offsetY = targetTouch.pageY - offset.Y;
}
for (i = 0; i < t.changedTouches.length; i++) {
var changedTouch = t.changedTouches[i];
changedTouch.offsetX = changedTouch.pageX - offset.X;
changedTouch.offsetY = changedTouch.pageY - offset.Y;
}
return t;
}
attachEventHandler(curElement, "touchstart", function (t) {
// Removes unwanted behaviour of the canvas when touching canvas
curElement.setAttribute("style","-webkit-user-select: none");
curElement.setAttribute("onclick","void(0)");
curElement.setAttribute("style","-webkit-tap-highlight-color:rgba(0,0,0,0)");
// Loop though eventHandlers and remove mouse listeners
for (var i=0, ehl=eventHandlers.length; i 'while"B1""A2"'
// parentheses() = B, brackets[] = C and braces{} = A
function splitToAtoms(code) {
var atoms = [];
var items = code.split(/([\{\[\(\)\]\}])/);
var result = items[0];
var stack = [];
for(var i=1; i < items.length; i += 2) {
var item = items[i];
if(item === '[' || item === '{' || item === '(') {
stack.push(result); result = item;
} else if(item === ']' || item === '}' || item === ')') {
var kind = item === '}' ? 'A' : item === ')' ? 'B' : 'C';
var index = atoms.length; atoms.push(result + item);
result = stack.pop() + '"' + kind + (index + 1) + '"';
}
result += items[i + 1];
}
atoms.unshift(result);
return atoms;
}
// replaces strings and regexs keyed by index with an array of strings
function injectStrings(code, strings) {
return code.replace(/'(\d+)'/g, function(all, index) {
var val = strings[index];
if(val.charAt(0) === "/") {
return val;
}
return (/^'((?:[^'\\\n])|(?:\\.[0-9A-Fa-f]*))'$/).test(val) ? "(new $p.Character(" + val + "))" : val;
});
}
// trims off leading and trailing spaces
// returns an object. object.left, object.middle, object.right, object.untrim
function trimSpaces(string) {
var m1 = /^\s*/.exec(string), result;
if(m1[0].length === string.length) {
result = {left: m1[0], middle: "", right: ""};
} else {
var m2 = /\s*$/.exec(string);
result = {left: m1[0], middle: string.substring(m1[0].length, m2.index), right: m2[0]};
}
result.untrim = function(t) { return this.left + t + this.right; };
return result;
}
// simple trim of leading and trailing spaces
function trim(string) {
return string.replace(/^\s+/,'').replace(/\s+$/,'');
}
function appendToLookupTable(table, array) {
for(var i=0,l=array.length;i([=]?)/g, replaceFunc);
} while (genericsWereRemoved);
var atoms = splitToAtoms(codeWoGenerics);
var replaceContext;
var declaredClasses = {}, currentClassId, classIdSeed = 0;
function addAtom(text, type) {
var lastIndex = atoms.length;
atoms.push(text);
return '"' + type + lastIndex + '"';
}
function generateClassId() {
return "class" + (++classIdSeed);
}
function appendClass(class_, classId, scopeId) {
class_.classId = classId;
class_.scopeId = scopeId;
declaredClasses[classId] = class_;
}
// functions defined below
var transformClassBody, transformInterfaceBody, transformStatementsBlock, transformStatements, transformMain, transformExpression;
var classesRegex = /\b((?:(?:public|private|final|protected|static|abstract)\s+)*)(class|interface)\s+([A-Za-z_$][\w$]*\b)(\s+extends\s+[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*,\s*[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*\b)*)?(\s+implements\s+[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*,\s*[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*\b)*)?\s*("A\d+")/g;
var methodsRegex = /\b((?:(?:public|private|final|protected|static|abstract|synchronized)\s+)*)((?!(?:else|new|return|throw|function|public|private|protected)\b)[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*"C\d+")*)\s*([A-Za-z_$][\w$]*\b)\s*("B\d+")(\s*throws\s+[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*,\s*[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*)*)?\s*("A\d+"|;)/g;
var fieldTest = /^((?:(?:public|private|final|protected|static)\s+)*)((?!(?:else|new|return|throw)\b)[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*"C\d+")*)\s*([A-Za-z_$][\w$]*\b)\s*(?:"C\d+"\s*)*([=,]|$)/;
var cstrsRegex = /\b((?:(?:public|private|final|protected|static|abstract)\s+)*)((?!(?:new|return|throw)\b)[A-Za-z_$][\w$]*\b)\s*("B\d+")(\s*throws\s+[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*,\s*[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*)*)?\s*("A\d+")/g;
var attrAndTypeRegex = /^((?:(?:public|private|final|protected|static)\s+)*)((?!(?:new|return|throw)\b)[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*(?:\s*"C\d+")*)\s*/;
var functionsRegex = /\bfunction(?:\s+([A-Za-z_$][\w$]*))?\s*("B\d+")\s*("A\d+")/g;
// This converts classes, methods and functions into atoms, and adds them to the atoms array.
// classes = E, methods = D and functions = H
function extractClassesAndMethods(code) {
var s = code;
s = s.replace(classesRegex, function(all) {
return addAtom(all, 'E');
});
s = s.replace(methodsRegex, function(all) {
return addAtom(all, 'D');
});
s = s.replace(functionsRegex, function(all) {
return addAtom(all, 'H');
});
return s;
}
// This converts constructors into atoms, and adds them to the atoms array.
// constructors = G
function extractConstructors(code, className) {
var result = code.replace(cstrsRegex, function(all, attr, name, params, throws_, body) {
if(name !== className) {
return all;
}
return addAtom(all, 'G');
});
return result;
}
// AstParam contains the name of a parameter inside a function declaration
function AstParam(name) {
this.name = name;
}
AstParam.prototype.toString = function() {
return this.name;
};
// AstParams contains an array of AstParam objects
function AstParams(params) {
this.params = params;
}
AstParams.prototype.getNames = function() {
var names = [];
for(var i=0,l=this.params.length;i {...}
s = s.replace(/\bnew\s+([A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*)(?:\s*"C\d+")+\s*("A\d+")/g, function(all, type, init) {
return init;
});
// new Runnable() {...} --> "F???"
s = s.replace(/\bnew\s+([A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*)(?:\s*"B\d+")\s*("A\d+")/g, function(all, type, init) {
return addAtom(all, 'F');
});
// function(...) { } --> "H???"
s = s.replace(functionsRegex, function(all) {
return addAtom(all, 'H');
});
// new type[?] --> createJavaArray('type', [?])
s = s.replace(/\bnew\s+([A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*)\s*("C\d+"(?:\s*"C\d+")*)/g, function(all, type, index) {
var args = index.replace(/"C(\d+)"/g, function(all, j) { return atoms[j]; })
.replace(/\[\s*\]/g, "[null]").replace(/\s*\]\s*\[\s*/g, ", ");
var arrayInitializer = "{" + args.substring(1, args.length - 1) + "}";
var createArrayArgs = "('" + type + "', " + addAtom(arrayInitializer, 'A') + ")";
return '$p.createJavaArray' + addAtom(createArrayArgs, 'B');
});
// .length() --> .length
s = s.replace(/(\.\s*length)\s*"B\d+"/g, "$1");
// #000000 --> 0x000000
s = s.replace(/#([0-9A-Fa-f]{6})\b/g, function(all, digits) {
return "0xFF" + digits;
});
// delete (type)???, except (int)???
s = s.replace(/"B(\d+)"(\s*(?:[\w$']|"B))/g, function(all, index, next) {
var atom = atoms[index];
if(!/^\(\s*[A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*\s*(?:"C\d+"\s*)*\)$/.test(atom)) {
return all;
}
if(/^\(\s*int\s*\)$/.test(atom)) {
return "(int)" + next;
}
var indexParts = atom.split(/"C(\d+)"/g);
if(indexParts.length > 1) {
// even items contains atom numbers, can check only first
if(! /^\[\s*\]$/.test(atoms[indexParts[1]])) {
return all; // fallback - not a cast
}
}
return "" + next;
});
// (int)??? -> __int_cast(???)
s = s.replace(/\(int\)([^,\]\)\}\?\:\*\+\-\/\^\|\%\&\~<\>\=]+)/g, function(all, arg) {
var trimmed = trimSpaces(arg);
return trimmed.untrim("__int_cast(" + trimmed.middle + ")");
});
// super() -> $superCstr(), super. -> $super.;
s = s.replace(/\bsuper(\s*"B\d+")/g, "$$superCstr$1").replace(/\bsuper(\s*\.)/g, "$$super$1");
// 000.43->0.43 and 0010f->10, but not 0010
s = s.replace(/\b0+((\d*)(?:\.[\d*])?(?:[eE][\-\+]?\d+)?[fF]?)\b/, function(all, numberWo0, intPart) {
if( numberWo0 === intPart) {
return all;
}
return intPart === "" ? "0" + numberWo0 : numberWo0;
});
// 3.0f -> 3.0
s = s.replace(/\b(\.?\d+\.?)[fF]\b/g, "$1");
// Weird (?) parsing errors with %
s = s.replace(/([^\s])%([^=\s])/g, "$1 % $2");
// Since frameRate() and frameRate are different things,
// we need to differentiate them somehow. So when we parse
// the Processing.js source, replace frameRate so it isn't
// confused with frameRate(), as well as keyPressed and mousePressed
s = s.replace(/\b(frameRate|keyPressed|mousePressed)\b(?!\s*"B)/g, "__$1");
// "boolean", "byte", "int", etc. => "parseBoolean", "parseByte", "parseInt", etc.
s = s.replace(/\b(boolean|byte|char|float|int)\s*"B/g, function(all, name) {
return "parse" + name.substring(0, 1).toUpperCase() + name.substring(1) + "\"B";
});
// "pixels" replacements:
// pixels[i] = c => pixels.setPixel(i,c) | pixels[i] => pixels.getPixel(i)
// pixels.length => pixels.getLength()
// pixels = ar => pixels.set(ar) | pixels => pixels.toArray()
s = s.replace(/\bpixels\b\s*(("C(\d+)")|\.length)?(\s*=(?!=)([^,\]\)\}]+))?/g,
function(all, indexOrLength, index, atomIndex, equalsPart, rightSide) {
if(index) {
var atom = atoms[atomIndex];
if(equalsPart) {
return "pixels.setPixel" + addAtom("(" +atom.substring(1, atom.length - 1) +
"," + rightSide + ")", 'B');
}
return "pixels.getPixel" + addAtom("(" + atom.substring(1, atom.length - 1) +
")", 'B');
}
if(indexOrLength) {
// length
return "pixels.getLength" + addAtom("()", 'B');
}
if(equalsPart) {
return "pixels.set" + addAtom("(" + rightSide + ")", 'B');
}
return "pixels.toArray" + addAtom("()", 'B');
});
// Java method replacements for: replace, replaceAll, replaceFirst, equals, hashCode, etc.
// xxx.replace(yyy) -> __replace(xxx, yyy)
// "xx".replace(yyy) -> __replace("xx", yyy)
var repeatJavaReplacement;
function replacePrototypeMethods(all, subject, method, atomIndex) {
var atom = atoms[atomIndex];
repeatJavaReplacement = true;
var trimmed = trimSpaces(atom.substring(1, atom.length - 1));
return "__" + method + ( trimmed.middle === "" ? addAtom("(" + subject.replace(/\.\s*$/, "") + ")", 'B') :
addAtom("(" + subject.replace(/\.\s*$/, "") + "," + trimmed.middle + ")", 'B') );
}
do {
repeatJavaReplacement = false;
s = s.replace(/((?:'\d+'|\b[A-Za-z_$][\w$]*\s*(?:"[BC]\d+")*)\s*\.\s*(?:[A-Za-z_$][\w$]*\s*(?:"[BC]\d+"\s*)*\.\s*)*)(replace|replaceAll|replaceFirst|contains|equals|equalsIgnoreCase|hashCode|toCharArray|printStackTrace|split|startsWith|endsWith|codePointAt)\s*"B(\d+)"/g,
replacePrototypeMethods);
} while (repeatJavaReplacement);
// xxx instanceof yyy -> __instanceof(xxx, yyy)
function replaceInstanceof(all, subject, type) {
repeatJavaReplacement = true;
return "__instanceof" + addAtom("(" + subject + ", " + type + ")", 'B');
}
do {
repeatJavaReplacement = false;
s = s.replace(/((?:'\d+'|\b[A-Za-z_$][\w$]*\s*(?:"[BC]\d+")*)\s*(?:\.\s*[A-Za-z_$][\w$]*\s*(?:"[BC]\d+"\s*)*)*)instanceof\s+([A-Za-z_$][\w$]*\s*(?:\.\s*[A-Za-z_$][\w$]*)*)/g,
replaceInstanceof);
} while (repeatJavaReplacement);
// this() -> $constr()
s = s.replace(/\bthis(\s*"B\d+")/g, "$$constr$1");
return s;
}
function AstInlineClass(baseInterfaceName, body) {
this.baseInterfaceName = baseInterfaceName;
this.body = body;
body.owner = this;
}
AstInlineClass.prototype.toString = function() {
return "new (" + this.body + ")";
};
function transformInlineClass(class_) {
var m = new RegExp(/\bnew\s*([A-Za-z_$][\w$]*\s*(?:\.\s*[A-Za-z_$][\w$]*)*)\s*"B\d+"\s*"A(\d+)"/).exec(class_);
var oldClassId = currentClassId, newClassId = generateClassId();
currentClassId = newClassId;
var uniqueClassName = m[1] + "$" + newClassId;
var inlineClass = new AstInlineClass(uniqueClassName,
transformClassBody(atoms[m[2]], uniqueClassName, "", "implements " + m[1]));
appendClass(inlineClass, newClassId, oldClassId);
currentClassId = oldClassId;
return inlineClass;
}
function AstFunction(name, params, body) {
this.name = name;
this.params = params;
this.body = body;
}
AstFunction.prototype.toString = function() {
var oldContext = replaceContext;
// saving "this." and parameters
var names = appendToLookupTable({"this":null}, this.params.getNames());
replaceContext = function (subject) {
return names.hasOwnProperty(subject.name) ? subject.name : oldContext(subject);
};
var result = "function";
if(this.name) {
result += " " + this.name;
}
result += this.params + " " + this.body;
replaceContext = oldContext;
return result;
};
function transformFunction(class_) {
var m = new RegExp(/\b([A-Za-z_$][\w$]*)\s*"B(\d+)"\s*"A(\d+)"/).exec(class_);
return new AstFunction( m[1] !== "function" ? m[1] : null,
transformParams(atoms[m[2]]), transformStatementsBlock(atoms[m[3]]));
}
function AstInlineObject(members) {
this.members = members;
}
AstInlineObject.prototype.toString = function() {
var oldContext = replaceContext;
replaceContext = function (subject) {
return subject.name === "this" ? "this" : oldContext(subject); // saving "this."
};
var result = "";
for(var i=0,l=this.members.length;i= 0) { // can be without var declaration
init = init.substring(0, init.indexOf("="));
}
return "(" + init + " in " + this.container + ")";
};
function AstForEachExpression(initStatement, container) {
this.initStatement = initStatement;
this.container = container;
}
AstForEachExpression.iteratorId = 0;
AstForEachExpression.prototype.toString = function() {
var init = this.initStatement.toString();
var iterator = "$it" + (AstForEachExpression.iteratorId++);
var variableName = init.replace(/^\s*var\s*/, "").split("=")[0];
var initIteratorAndVariable = "var " + iterator + " = new $p.ObjectIterator(" + this.container + "), " +
variableName + " = void(0)";
var nextIterationCondition = iterator + ".hasNext() && ((" +
variableName + " = " + iterator + ".next()) || true)";
return "(" + initIteratorAndVariable + "; " + nextIterationCondition + ";)";
};
function transformForExpression(expr) {
var content;
if (/\bin\b/.test(expr)) {
content = expr.substring(1, expr.length - 1).split(/\bin\b/g);
return new AstForInExpression( transformStatement(trim(content[0])),
transformExpression(content[1]));
}
if (expr.indexOf(":") >= 0 && expr.indexOf(";") < 0) {
content = expr.substring(1, expr.length - 1).split(":");
return new AstForEachExpression( transformStatement(trim(content[0])),
transformExpression(content[1]));
}
content = expr.substring(1, expr.length - 1).split(";");
return new AstForExpression( transformStatement(trim(content[0])),
transformExpression(content[1]), transformExpression(content[2]));
}
function sortByWeight(array) {
array.sort(function (a,b) {
return b.weight - a.weight;
});
}
function AstInnerInterface(name, body, isStatic) {
this.name = name;
this.body = body;
this.isStatic = isStatic;
body.owner = this;
}
AstInnerInterface.prototype.toString = function() {
return "" + this.body;
};
function AstInnerClass(name, body, isStatic) {
this.name = name;
this.body = body;
this.isStatic = isStatic;
body.owner = this;
}
AstInnerClass.prototype.toString = function() {
return "" + this.body;
};
function transformInnerClass(class_) {
var m = classesRegex.exec(class_); // 1 - attr, 2 - class|int, 3 - name, 4 - extends, 5 - implements, 6 - body
classesRegex.lastIndex = 0;
var isStatic = m[1].indexOf("static") >= 0;
var body = atoms[getAtomIndex(m[6])], innerClass;
var oldClassId = currentClassId, newClassId = generateClassId();
currentClassId = newClassId;
if(m[2] === "interface") {
innerClass = new AstInnerInterface(m[3], transformInterfaceBody(body, m[3], m[4]), isStatic);
} else {
innerClass = new AstInnerClass(m[3], transformClassBody(body, m[3], m[4], m[5]), isStatic);
}
appendClass(innerClass, newClassId, oldClassId);
currentClassId = oldClassId;
return innerClass;
}
function AstClassMethod(name, params, body, isStatic) {
this.name = name;
this.params = params;
this.body = body;
this.isStatic = isStatic;
}
AstClassMethod.prototype.toString = function(){
var paramNames = appendToLookupTable({}, this.params.getNames());
var oldContext = replaceContext;
replaceContext = function (subject) {
return paramNames.hasOwnProperty(subject.name) ? subject.name : oldContext(subject);
};
var result = "function " + this.methodId + this.params + " " + this.body +"\n";
replaceContext = oldContext;
return result;
};
function transformClassMethod(method) {
var m = methodsRegex.exec(method);
methodsRegex.lastIndex = 0;
var isStatic = m[1].indexOf("static") >= 0;
var body = m[6] !== ';' ? atoms[getAtomIndex(m[6])] : "{}";
return new AstClassMethod(m[3], transformParams(atoms[getAtomIndex(m[4])]),
transformStatementsBlock(body), isStatic );
}
function AstClassField(definitions, fieldType, isStatic) {
this.definitions = definitions;
this.fieldType = fieldType;
this.isStatic = isStatic;
}
AstClassField.prototype.getNames = function() {
var names = [];
for(var i=0,l=this.definitions.length;i= 0;
var definitions = statement.substring(attrAndType[0].length).split(/,\s*/g);
var defaultTypeValue = getDefaultValueForType(attrAndType[2]);
for(var i=0; i < definitions.length; ++i) {
definitions[i] = transformVarDefinition(definitions[i], defaultTypeValue);
}
return new AstClassField(definitions, attrAndType[2], isStatic);
}
function AstConstructor(params, body) {
this.params = params;
this.body = body;
}
AstConstructor.prototype.toString = function() {
var paramNames = appendToLookupTable({}, this.params.getNames());
var oldContext = replaceContext;
replaceContext = function (subject) {
return paramNames.hasOwnProperty(subject.name) ? subject.name : oldContext(subject);
};
var prefix = "function $constr_" + this.params.params.length + this.params.toString();
var body = this.body.toString();
if(!/\$(superCstr|constr)\b/.test(body)) {
body = "{\n$superCstr();\n" + body.substring(1);
}
replaceContext = oldContext;
return prefix + body + "\n";
};
function transformConstructor(cstr) {
var m = new RegExp(/"B(\d+)"\s*"A(\d+)"/).exec(cstr);
var params = transformParams(atoms[m[1]]);
return new AstConstructor(params, transformStatementsBlock(atoms[m[2]]));
}
function AstInterfaceBody(name, interfacesNames, methodsNames, fields, innerClasses, misc) {
var i,l;
this.name = name;
this.interfacesNames = interfacesNames;
this.methodsNames = methodsNames;
this.fields = fields;
this.innerClasses = innerClasses;
this.misc = misc;
for(i=0,l=fields.length; i 0) {
result += this.functions.join('\n') + '\n';
}
sortByWeight(this.innerClasses);
for (i = 0, l = this.innerClasses.length; i < l; ++i) {
var innerClass = this.innerClasses[i];
if (innerClass.isStatic) {
staticDefinitions += className + "." + innerClass.name + " = " + innerClass + ";\n";
result += selfId + "." + innerClass.name + " = " + className + "." + innerClass.name + ";\n";
} else {
result += selfId + "." + innerClass.name + " = " + innerClass + ";\n";
}
}
for (i = 0, l = this.fields.length; i < l; ++i) {
var field = this.fields[i];
if (field.isStatic) {
staticDefinitions += className + "." + field.definitions.join(";\n" + className + ".") + ";\n";
for (j = 0, m = field.definitions.length; j < m; ++j) {
var fieldName = field.definitions[j].name, staticName = className + "." + fieldName;
result += "$p.defineProperty(" + selfId + ", '" + fieldName + "', {" +
"get: function(){return " + staticName + "}, " +
"set: function(val){" + staticName + " = val}});\n";
}
} else {
result += selfId + "." + field.definitions.join(";\n" + selfId + ".") + ";\n";
}
}
var methodOverloads = {};
for (i = 0, l = this.methods.length; i < l; ++i) {
var method = this.methods[i];
var overload = methodOverloads[method.name];
var methodId = method.name + "$" + method.params.params.length;
if (overload) {
++overload;
methodId += "_" + overload;
} else {
overload = 1;
}
method.methodId = methodId;
methodOverloads[method.name] = overload;
if (method.isStatic) {
staticDefinitions += method;
staticDefinitions += "$p.addMethod(" + className + ", '" + method.name + "', " + methodId + ");\n";
result += "$p.addMethod(" + selfId + ", '" + method.name + "', " + methodId + ");\n";
} else {
result += method;
result += "$p.addMethod(" + selfId + ", '" + method.name + "', " + methodId + ");\n";
}
}
result += trim(this.misc.tail);
if (this.cstrs.length > 0) {
result += this.cstrs.join('\n') + '\n';
}
result += "function $constr() {\n";
var cstrsIfs = [];
for (i = 0, l = this.cstrs.length; i < l; ++i) {
var paramsLength = this.cstrs[i].params.params.length;
cstrsIfs.push("if(arguments.length === " + paramsLength + ") { " +
"$constr_" + paramsLength + ".apply(" + selfId + ", arguments); }");
}
if(cstrsIfs.length > 0) {
result += cstrsIfs.join(" else ") + " else ";
}
// ??? add check if length is 0, otherwise fail
result += "$superCstr();\n}\n";
result += "$constr.apply(null, arguments);\n";
replaceContext = oldContext;
return "(function() {\n" +
"function " + className + "() {\n" + result + "}\n" +
staticDefinitions +
metadata +
"return " + className + ";\n" +
"})()";
};
transformClassBody = function(body, name, baseName, interfaces) {
var declarations = body.substring(1, body.length - 1);
declarations = extractClassesAndMethods(declarations);
declarations = extractConstructors(declarations, name);
var methods = [], classes = [], cstrs = [], functions = [];
declarations = declarations.replace(/"([DEGH])(\d+)"/g, function(all, type, index) {
if(type === 'D') { methods.push(index); }
else if(type === 'E') { classes.push(index); }
else if(type === 'H') { functions.push(index); }
else { cstrs.push(index); }
return "";
});
var fields = declarations.replace(/^(?:\s*;)+/, "").split(/;(?:\s*;)*/g);
var baseClassName, interfacesNames;
var i;
if(baseName !== undef) {
baseClassName = baseName.replace(/^\s*extends\s+([A-Za-z_$][\w$]*\b(?:\s*\.\s*[A-Za-z_$][\w$]*\b)*)\s*$/g, "$1");
}
if(interfaces !== undef) {
interfacesNames = interfaces.replace(/^\s*implements\s+(.+?)\s*$/g, "$1").split(/\s*,\s*/g);
}
for(i = 0; i < functions.length; ++i) {
functions[i] = transformFunction(atoms[functions[i]]);
}
for(i = 0; i < methods.length; ++i) {
methods[i] = transformClassMethod(atoms[methods[i]]);
}
for(i = 0; i < fields.length - 1; ++i) {
var field = trimSpaces(fields[i]);
fields[i] = transformClassField(field.middle);
}
var tail = fields.pop();
for(i = 0; i < cstrs.length; ++i) {
cstrs[i] = transformConstructor(atoms[cstrs[i]]);
}
for(i = 0; i < classes.length; ++i) {
classes[i] = transformInnerClass(atoms[classes[i]]);
}
return new AstClassBody(name, baseClassName, interfacesNames, functions, methods, fields, cstrs,
classes, { tail: tail });
};
function AstInterface(name, body) {
this.name = name;
this.body = body;
body.owner = this;
}
AstInterface.prototype.toString = function() {
return "var " + this.name + " = " + this.body + ";\n" +
"$p." + this.name + " = " + this.name + ";\n";
};
function AstClass(name, body) {
this.name = name;
this.body = body;
body.owner = this;
}
AstClass.prototype.toString = function() {
return "var " + this.name + " = " + this.body + ";\n" +
"$p." + this.name + " = " + this.name + ";\n";
};
function transformGlobalClass(class_) {
var m = classesRegex.exec(class_); // 1 - attr, 2 - class|int, 3 - name, 4 - extends, 5 - implements, 6 - body
classesRegex.lastIndex = 0;
var body = atoms[getAtomIndex(m[6])];
var oldClassId = currentClassId, newClassId = generateClassId();
currentClassId = newClassId;
var globalClass;
if(m[2] === "interface") {
globalClass = new AstInterface(m[3], transformInterfaceBody(body, m[3], m[4]) );
} else {
globalClass = new AstClass(m[3], transformClassBody(body, m[3], m[4], m[5]) );
}
appendClass(globalClass, newClassId, oldClassId);
currentClassId = oldClassId;
return globalClass;
}
function AstMethod(name, params, body) {
this.name = name;
this.params = params;
this.body = body;
}
AstMethod.prototype.toString = function(){
var paramNames = appendToLookupTable({}, this.params.getNames());
var oldContext = replaceContext;
replaceContext = function (subject) {
return paramNames.hasOwnProperty(subject.name) ? subject.name : oldContext(subject);
};
var result = "function " + this.name + this.params + " " + this.body + "\n" +
"$p." + this.name + " = " + this.name + ";";
replaceContext = oldContext;
return result;
};
function transformGlobalMethod(method) {
var m = methodsRegex.exec(method);
var result =
methodsRegex.lastIndex = 0;
return new AstMethod(m[3], transformParams(atoms[getAtomIndex(m[4])]),
transformStatementsBlock(atoms[getAtomIndex(m[6])]));
}
function preStatementsTransform(statements) {
var s = statements;
// turns multiple catch blocks into one, because we have no way to properly get into them anyway.
s = s.replace(/\b(catch\s*"B\d+"\s*"A\d+")(\s*catch\s*"B\d+"\s*"A\d+")+/g, "$1");
return s;
}
function AstForStatement(argument, misc) {
this.argument = argument;
this.misc = misc;
}
AstForStatement.prototype.toString = function() {
return this.misc.prefix + this.argument.toString();
};
function AstCatchStatement(argument, misc) {
this.argument = argument;
this.misc = misc;
}
AstCatchStatement.prototype.toString = function() {
return this.misc.prefix + this.argument.toString();
};
function AstPrefixStatement(name, argument, misc) {
this.name = name;
this.argument = argument;
this.misc = misc;
}
AstPrefixStatement.prototype.toString = function() {
var result = this.misc.prefix;
if(this.argument !== undef) {
result += this.argument.toString();
}
return result;
};
function AstSwitchCase(expr) {
this.expr = expr;
}
AstSwitchCase.prototype.toString = function() {
return "case " + this.expr + ":";
};
function AstLabel(label) {
this.label = label;
}
AstLabel.prototype.toString = function() {
return this.label;
};
transformStatements = function(statements, transformMethod, transformClass) {
var nextStatement = new RegExp(/\b(catch|for|if|switch|while|with)\s*"B(\d+)"|\b(do|else|finally|return|throw|try|break|continue)\b|("[ADEH](\d+)")|\b(case)\s+([^:]+):|\b([A-Za-z_$][\w$]*\s*:)|(;)/g);
var res = [];
statements = preStatementsTransform(statements);
var lastIndex = 0, m, space;
// m contains the matches from the nextStatement regexp, null if there are no matches.
// nextStatement.exec starts searching at nextStatement.lastIndex.
while((m = nextStatement.exec(statements)) !== null) {
if(m[1] !== undef) { // catch, for ...
var i = statements.lastIndexOf('"B', nextStatement.lastIndex);
var statementsPrefix = statements.substring(lastIndex, i);
if(m[1] === "for") {
res.push(new AstForStatement(transformForExpression(atoms[m[2]]),
{ prefix: statementsPrefix }) );
} else if(m[1] === "catch") {
res.push(new AstCatchStatement(transformParams(atoms[m[2]]),
{ prefix: statementsPrefix }) );
} else {
res.push(new AstPrefixStatement(m[1], transformExpression(atoms[m[2]]),
{ prefix: statementsPrefix }) );
}
} else if(m[3] !== undef) { // do, else, ...
res.push(new AstPrefixStatement(m[3], undef,
{ prefix: statements.substring(lastIndex, nextStatement.lastIndex) }) );
} else if(m[4] !== undef) { // block, class and methods
space = statements.substring(lastIndex, nextStatement.lastIndex - m[4].length);
if(trim(space).length !== 0) { continue; } // avoiding new type[] {} construct
res.push(space);
var kind = m[4].charAt(1), atomIndex = m[5];
if(kind === 'D') {
res.push(transformMethod(atoms[atomIndex]));
} else if(kind === 'E') {
res.push(transformClass(atoms[atomIndex]));
} else if(kind === 'H') {
res.push(transformFunction(atoms[atomIndex]));
} else {
res.push(transformStatementsBlock(atoms[atomIndex]));
}
} else if(m[6] !== undef) { // switch case
res.push(new AstSwitchCase(transformExpression(trim(m[7]))));
} else if(m[8] !== undef) { // label
space = statements.substring(lastIndex, nextStatement.lastIndex - m[8].length);
if(trim(space).length !== 0) { continue; } // avoiding ?: construct
res.push(new AstLabel(statements.substring(lastIndex, nextStatement.lastIndex)) );
} else { // semicolon
var statement = trimSpaces(statements.substring(lastIndex, nextStatement.lastIndex - 1));
res.push(statement.left);
res.push(transformStatement(statement.middle));
res.push(statement.right + ";");
}
lastIndex = nextStatement.lastIndex;
}
var statementsTail = trimSpaces(statements.substring(lastIndex));
res.push(statementsTail.left);
if(statementsTail.middle !== "") {
res.push(transformStatement(statementsTail.middle));
res.push(";" + statementsTail.right);
}
return res;
};
function getLocalNames(statements) {
var localNames = [];
for(var i=0,l=statements.length;i 0) {
for (i = 0, l = interfacesNames.length; i < l; ++i) {
var interface_ = findInScopes(class_, interfacesNames[i]);
interfaces.push(interface_);
if (!interface_) {
continue;
}
if (!interface_.derived) {
interface_.derived = [];
}
interface_.derived.push(class_);
}
if (interfaces.length > 0) {
class_.interfaces = interfaces;
}
}
}
}
}
function setWeight(ast) {
var queue = [], tocheck = {};
var id, scopeId, class_;
// queue most inner and non-inherited
for (id in declaredClasses) {
if (declaredClasses.hasOwnProperty(id)) {
class_ = declaredClasses[id];
if (!class_.inScope && !class_.derived) {
queue.push(id);
class_.weight = 0;
} else {
var dependsOn = [];
if (class_.inScope) {
for (scopeId in class_.inScope) {
if (class_.inScope.hasOwnProperty(scopeId)) {
dependsOn.push(class_.inScope[scopeId]);
}
}
}
if (class_.derived) {
dependsOn = dependsOn.concat(class_.derived);
}
tocheck[id] = dependsOn;
}
}
}
function removeDependentAndCheck(targetId, from) {
var dependsOn = tocheck[targetId];
if (!dependsOn) {
return false; // no need to process
}
var i = dependsOn.indexOf(from);
if (i < 0) {
return false;
}
dependsOn.splice(i, 1);
if (dependsOn.length > 0) {
return false;
}
delete tocheck[targetId];
return true;
}
while (queue.length > 0) {
id = queue.shift();
class_ = declaredClasses[id];
if (class_.scopeId && removeDependentAndCheck(class_.scopeId, class_)) {
queue.push(class_.scopeId);
declaredClasses[class_.scopeId].weight = class_.weight + 1;
}
if (class_.base && removeDependentAndCheck(class_.base.classId, class_)) {
queue.push(class_.base.classId);
class_.base.weight = class_.weight + 1;
}
if (class_.interfaces) {
var i, l;
for (i = 0, l = class_.interfaces.length; i < l; ++i) {
if (!class_.interfaces[i] ||
!removeDependentAndCheck(class_.interfaces[i].classId, class_)) {
continue;
}
queue.push(class_.interfaces[i].classId);
class_.interfaces[i].weight = class_.weight + 1;
}
}
}
}
var transformed = transformMain();
generateMetadata(transformed);
setWeight(transformed);
var redendered = transformed.toString();
// remove empty extra lines with space
redendered = redendered.replace(/\s*\n(?:[\t ]*\n)+/g, "\n\n");
return injectStrings(redendered, strings);
}// Parser ends
function preprocessCode(aCode, sketch) {
// Parse out @pjs directive, if any.
var dm = new RegExp(/\/\*\s*@pjs\s+((?:[^\*]|\*+[^\*\/])*)\*\//g).exec(aCode);
if (dm && dm.length === 2) {
// masks contents of a JSON to be replaced later
// to protect the contents from further parsing
var jsonItems = [],
directives = dm.splice(1, 2)[0].replace(/\{([\s\S]*?)\}/g, (function() {
return function(all, item) {
jsonItems.push(item);
return "{" + (jsonItems.length-1) + "}";
};
}())).replace('\n', '').replace('\r', '').split(";");
// We'll L/RTrim, and also remove any surrounding double quotes (e.g., just take string contents)
var clean = function(s) {
return s.replace(/^\s*["']?/, '').replace(/["']?\s*$/, '');
};
for (var i = 0, dl = directives.length; i < dl; i++) {
var pair = directives[i].split('=');
if (pair && pair.length === 2) {
var key = clean(pair[0]),
value = clean(pair[1]),
list = [];
// A few directives require work beyond storying key/value pairings
if (key === "preload") {
list = value.split(',');
// All pre-loaded images will get put in imageCache, keyed on filename
for (var j = 0, jl = list.length; j < jl; j++) {
var imageName = clean(list[j]);
sketch.imageCache.add(imageName);
}
// fonts can be declared as a string containing a url,
// or a JSON object, containing a font name, and a url
} else if (key === "font") {
list = value.split(",");
for (var x = 0, xl = list.length; x < xl; x++) {
var fontName = clean(list[x]),
index = /^\{(\d*?)\}$/.exec(fontName);
// if index is not null, send JSON, otherwise, send string
PFont.preloading.add(index ? JSON.parse("{" + jsonItems[index[1]] + "}") : fontName);
}
} else if (key === "pauseOnBlur") {
sketch.options.pauseOnBlur = value === "true";
} else if (key === "globalKeyEvents") {
sketch.options.globalKeyEvents = value === "true";
} else if (key.substring(0, 6) === "param-") {
sketch.params[key.substring(6)] = value;
} else {
sketch.options[key] = value;
}
}
}
}
return aCode;
}
// Parse/compiles Processing (Java-like) syntax to JavaScript syntax
Processing.compile = function(pdeCode) {
var sketch = new Processing.Sketch();
var code = preprocessCode(pdeCode, sketch);
var compiledPde = parseProcessing(code);
sketch.sourceCode = compiledPde;
return sketch;
};
//#endif
// tinylog lite JavaScript library
// http://purl.eligrey.com/tinylog/lite
/*global tinylog,print*/
var tinylogLite = (function() {
"use strict";
var tinylogLite = {},
undef = "undefined",
func = "function",
False = !1,
True = !0,
logLimit = 512,
log = "log";
if (typeof tinylog !== undef && typeof tinylog[log] === func) {
// pre-existing tinylog present
tinylogLite[log] = tinylog[log];
} else if (typeof document !== undef && !document.fake) {
(function() {
// DOM document
var doc = document,
$div = "div",
$style = "style",
$title = "title",
containerStyles = {
zIndex: 10000,
position: "fixed",
bottom: "0px",
width: "100%",
height: "15%",
fontFamily: "sans-serif",
color: "#ccc",
backgroundColor: "black",
paddingBottom: "5px"
},
outputStyles = {
position: "relative",
fontFamily: "monospace",
overflow: "auto",
height: "100%",
paddingTop: "5px"
},
resizerStyles = {
height: "5px",
marginTop: "-5px",
cursor: "n-resize",
backgroundColor: "darkgrey"
},
closeButtonStyles = {
position: "absolute",
top: "5px",
right: "20px",
color: "#111",
MozBorderRadius: "4px",
webkitBorderRadius: "4px",
borderRadius: "4px",
cursor: "pointer",
fontWeight: "normal",
textAlign: "center",
padding: "3px 5px",
backgroundColor: "#333",
fontSize: "12px"
},
entryStyles = {
//borderBottom: "1px solid #d3d3d3",
minHeight: "16px"
},
entryTextStyles = {
fontSize: "12px",
margin: "0 8px 0 8px",
maxWidth: "100%",
whiteSpace: "pre-wrap",
overflow: "auto"
},
view = doc.defaultView,
docElem = doc.body || doc.documentElement,
docElemStyle = docElem[$style],
setStyles = function() {
var i = arguments.length,
elemStyle, styles, style;
while (i--) {
styles = arguments[i--];
elemStyle = arguments[i][$style];
for (style in styles) {
if (styles.hasOwnProperty(style)) {
elemStyle[style] = styles[style];
}
}
}
},
observer = function(obj, event, handler) {
if (obj.addEventListener) {
obj.addEventListener(event, handler, False);
} else if (obj.attachEvent) {
obj.attachEvent("on" + event, handler);
}
return [obj, event, handler];
},
unobserve = function(obj, event, handler) {
if (obj.removeEventListener) {
obj.removeEventListener(event, handler, False);
} else if (obj.detachEvent) {
obj.detachEvent("on" + event, handler);
}
},
clearChildren = function(node) {
var children = node.childNodes,
child = children.length;
while (child--) {
node.removeChild(children.item(0));
}
},
append = function(to, elem) {
return to.appendChild(elem);
},
createElement = function(localName) {
return doc.createElement(localName);
},
createTextNode = function(text) {
return doc.createTextNode(text);
},
createLog = tinylogLite[log] = function(message) {
// don't show output log until called once
var uninit,
originalPadding = docElemStyle.paddingBottom,
container = createElement($div),
containerStyle = container[$style],
resizer = append(container, createElement($div)),
output = append(container, createElement($div)),
closeButton = append(container, createElement($div)),
resizingLog = False,
previousHeight = False,
previousScrollTop = False,
messages = 0,
updateSafetyMargin = function() {
// have a blank space large enough to fit the output box at the page bottom
docElemStyle.paddingBottom = container.clientHeight + "px";
},
setContainerHeight = function(height) {
var viewHeight = view.innerHeight,
resizerHeight = resizer.clientHeight;
// constrain the container inside the viewport's dimensions
if (height < 0) {
height = 0;
} else if (height + resizerHeight > viewHeight) {
height = viewHeight - resizerHeight;
}
containerStyle.height = height / viewHeight * 100 + "%";
updateSafetyMargin();
},
observers = [
observer(doc, "mousemove", function(evt) {
if (resizingLog) {
setContainerHeight(view.innerHeight - evt.clientY);
output.scrollTop = previousScrollTop;
}
}),
observer(doc, "mouseup", function() {
if (resizingLog) {
resizingLog = previousScrollTop = False;
}
}),
observer(resizer, "dblclick", function(evt) {
evt.preventDefault();
if (previousHeight) {
setContainerHeight(previousHeight);
previousHeight = False;
} else {
previousHeight = container.clientHeight;
containerStyle.height = "0px";
}
}),
observer(resizer, "mousedown", function(evt) {
evt.preventDefault();
resizingLog = True;
previousScrollTop = output.scrollTop;
}),
observer(resizer, "contextmenu", function() {
resizingLog = False;
}),
observer(closeButton, "click", function() {
uninit();
})
];
uninit = function() {
// remove observers
var i = observers.length;
while (i--) {
unobserve.apply(tinylogLite, observers[i]);
}
// remove tinylog lite from the DOM
docElem.removeChild(container);
docElemStyle.paddingBottom = originalPadding;
clearChildren(output);
clearChildren(container);
tinylogLite[log] = createLog;
};
setStyles(
container, containerStyles, output, outputStyles, resizer, resizerStyles, closeButton, closeButtonStyles);
closeButton[$title] = "Close Log";
append(closeButton, createTextNode("\u2716"));
resizer[$title] = "Double-click to toggle log minimization";
docElem.insertBefore(container, docElem.firstChild);
tinylogLite[log] = function(message) {
if (messages === logLimit) {
output.removeChild(output.firstChild);
} else {
messages++;
}
var entry = append(output, createElement($div)),
entryText = append(entry, createElement($div));
entry[$title] = (new Date()).toLocaleTimeString();
setStyles(
entry, entryStyles, entryText, entryTextStyles);
append(entryText, createTextNode(message));
output.scrollTop = output.scrollHeight;
};
tinylogLite.clear = function() {
messages = 0;
clearChildren(output);
};
tinylogLite[log](message);
updateSafetyMargin();
};
}());
} else if (typeof print === func) { // JS shell
tinylogLite[log] = print;
}
return tinylogLite;
}());
// end of tinylog lite JavaScript library
Processing.logger = tinylogLite;
Processing.version = "@VERSION@";
// Share lib space
Processing.lib = {};
Processing.registerLibrary = function(name, desc) {
Processing.lib[name] = desc;
if(desc.hasOwnProperty("init")) {
desc.init(defaultScope);
}
};
// Store Processing instances. Only Processing.instances,
// Processing.getInstanceById are exposed.
Processing.instances = processingInstances;
Processing.getInstanceById = function(name) {
return processingInstances[processingInstanceIds[name]];
};
Processing.Sketch = function(attachFunction) {
this.attachFunction = attachFunction; // can be optional
this.options = {
pauseOnBlur: false,
globalKeyEvents: false
};
/* Optional Sketch event hooks:
* onLoad - parsing/preloading is done, before sketch starts
* onSetup - setup() has been called, before first draw()
* onPause - noLoop() has been called, pausing draw loop
* onLoop - loop() has been called, resuming draw loop
* onFrameStart - draw() loop about to begin
* onFrameEnd - draw() loop finished
* onExit - exit() done being called
*/
this.onLoad = nop;
this.onSetup = nop;
this.onPause = nop;
this.onLoop = nop;
this.onFrameStart = nop;
this.onFrameEnd = nop;
this.onExit = nop;
this.params = {};
this.imageCache = {
pending: 0,
images: {},
// Opera requires special administration for preloading
operaCache: {},
// Specify an optional img arg if the image is already loaded in the DOM,
// otherwise href will get loaded.
add: function(href, img) {
// Prevent muliple loads for an image, in case it gets
// preloaded more than once, or is added via JS and then preloaded.
if (this.images[href]) {
return;
}
if (!isDOMPresent) {
this.images[href] = null;
}
// No image in the DOM, kick-off a background load
if (!img) {
img = new Image();
img.onload = (function(owner) {
return function() {
owner.pending--;
};
}(this));
this.pending++;
img.src = href;
}
this.images[href] = img;
// Opera will not load images until they are inserted into the DOM.
if (window.opera) {
var div = document.createElement("div");
div.appendChild(img);
// we can't use "display: none", since that makes it invisible, and thus not load
div.style.position = "absolute";
div.style.opacity = 0;
div.style.width = "1px";
div.style.height= "1px";
if (!this.operaCache[href]) {
document.body.appendChild(div);
this.operaCache[href] = div;
}
}
}
};
this.sourceCode = undefined;
this.attach = function(processing) {
// either attachFunction or sourceCode must be present on attach
if(typeof this.attachFunction === "function") {
this.attachFunction(processing);
} else if(this.sourceCode) {
var func = ((new Function("return (" + this.sourceCode + ");"))());
func(processing);
this.attachFunction = func;
} else {
throw "Unable to attach sketch to the processing instance";
}
};
//#if PARSER
this.toString = function() {
var i;
var code = "((function(Sketch) {\n";
code += "var sketch = new Sketch(\n" + this.sourceCode + ");\n";
for(i in this.options) {
if(this.options.hasOwnProperty(i)) {
var value = this.options[i];
code += "sketch.options." + i + " = " +
(typeof value === 'string' ? '\"' + value + '\"' : "" + value) + ";\n";
}
}
for(i in this.imageCache) {
if(this.options.hasOwnProperty(i)) {
code += "sketch.imageCache.add(\"" + i + "\");\n";
}
}
// TODO serialize fonts
code += "return sketch;\n})(Processing.Sketch))";
return code;
};
//#endif
};
//#if PARSER
/**
* aggregate all source code into a single file, then rewrite that
* source and bind to canvas via new Processing(canvas, sourcestring).
* @param {CANVAS} canvas The html canvas element to bind to
* @param {String[]} source The array of files that must be loaded
*/
var loadSketchFromSources = function(canvas, sources) {
var code = [], errors = [], sourcesCount = sources.length, loaded = 0;
function ajaxAsync(url, callback) {
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = function() {
if (xhr.readyState === 4) {
var error;
if (xhr.status !== 200 && xhr.status !== 0) {
error = "Invalid XHR status " + xhr.status;
} else if (xhr.responseText === "") {
// Give a hint when loading fails due to same-origin issues on file:/// urls
if ( ("withCredentials" in new XMLHttpRequest()) &&
(new XMLHttpRequest()).withCredentials === false &&
window.location.protocol === "file:" ) {
error = "XMLHttpRequest failure, possibly due to a same-origin policy violation. You can try loading this page in another browser, or load it from http://localhost using a local webserver. See the Processing.js README for a more detailed explanation of this problem and solutions.";
} else {
error = "File is empty.";
}
}
callback(xhr.responseText, error);
}
};
xhr.open("GET", url, true);
if (xhr.overrideMimeType) {
xhr.overrideMimeType("application/json");
}
xhr.setRequestHeader("If-Modified-Since", "Fri, 01 Jan 1960 00:00:00 GMT"); // no cache
xhr.send(null);
}
function loadBlock(index, filename) {
function callback(block, error) {
code[index] = block;
++loaded;
if (error) {
errors.push(filename + " ==> " + error);
}
if (loaded === sourcesCount) {
if (errors.length === 0) {
try {
return new Processing(canvas, code.join("\n"));
} catch(e) {
throw "Processing.js: Unable to execute pjs sketch: " + e;
}
} else {
throw "Processing.js: Unable to load pjs sketch files: " + errors.join("\n");
}
}
}
if (filename.charAt(0) === '#') {
// trying to get script from the element
var scriptElement = document.getElementById(filename.substring(1));
if (scriptElement) {
callback(scriptElement.text || scriptElement.textContent);
} else {
callback("", "Unable to load pjs sketch: element with id \'" + filename.substring(1) + "\' was not found");
}
return;
}
ajaxAsync(filename, callback);
}
for (var i = 0; i < sourcesCount; ++i) {
loadBlock(i, sources[i]);
}
};
/**
* Automatic initialization function.
*/
var init = function() {
document.removeEventListener('DOMContentLoaded', init, false);
var canvas = document.getElementsByTagName('canvas'),
filenames;
for (var i = 0, l = canvas.length; i < l; i++) {
// datasrc and data-src are deprecated.
var processingSources = canvas[i].getAttribute('data-processing-sources');
if (processingSources === null) {
// Temporary fallback for datasrc and data-src
processingSources = canvas[i].getAttribute('data-src');
if (processingSources === null) {
processingSources = canvas[i].getAttribute('datasrc');
}
}
if (processingSources) {
filenames = processingSources.split(' ');
for (var j = 0; j < filenames.length;) {
if (filenames[j]) {
j++;
} else {
filenames.splice(j, 1);
}
}
loadSketchFromSources(canvas[i], filenames);
}
}
// also process all