separating cellular noise ... and start researching on it
@ -0,0 +1,50 @@
|
||||
// Author @patriciogv - 2015 - patriciogonzalezvivo.com
|
||||
|
||||
#ifdef GL_ES
|
||||
precision mediump float;
|
||||
#endif
|
||||
|
||||
uniform vec2 u_resolution;
|
||||
uniform vec2 u_mouse;
|
||||
uniform float u_time;
|
||||
|
||||
vec2 skew (vec2 st) {
|
||||
vec2 r = vec2(0.0);
|
||||
r.x = 1.1547*st.x;
|
||||
r.y = st.y+0.5*r.x;
|
||||
return r;
|
||||
}
|
||||
|
||||
vec3 simplexGrid (vec2 st) {
|
||||
vec3 xyz = vec3(0.0);
|
||||
|
||||
vec2 p = fract(skew(st));
|
||||
if (p.x > p.y) {
|
||||
xyz.xy = 1.0-vec2(p.x,p.y-p.x);
|
||||
xyz.z = p.y;
|
||||
} else {
|
||||
xyz.yz = 1.0-vec2(p.x-p.y,p.y);
|
||||
xyz.x = p.x;
|
||||
}
|
||||
|
||||
return fract(xyz);
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec2 st = gl_FragCoord.xy/u_resolution.xy;
|
||||
vec3 color = vec3(0.0);
|
||||
|
||||
// Scale the space to see the grid
|
||||
st *= 10.;
|
||||
|
||||
// Show the 2D grid
|
||||
color.rg = fract(st);
|
||||
|
||||
// Skew the 2D grid
|
||||
// color.rg = fract(skew(st));
|
||||
|
||||
// Subdivide the grid into to equilateral triangles
|
||||
// color = simplexGrid(st);
|
||||
|
||||
gl_FragColor = vec4(color,1.0);
|
||||
}
|
After Width: | Height: | Size: 101 KiB |
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 67 KiB |
@ -0,0 +1,65 @@
|
||||
// Author @patriciogv - 2015
|
||||
|
||||
#ifdef GL_ES
|
||||
precision mediump float;
|
||||
#endif
|
||||
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_time;
|
||||
|
||||
// Cellular noise ("Worley noise") in 2D in GLSL.
|
||||
// Copyright (c) Stefan Gustavson 2011-04-19. All rights reserved.
|
||||
// This code is released under the conditions of the MIT license.
|
||||
// See LICENSE file for details.
|
||||
|
||||
// Permutation polynomial: (34x^2 + x) mod 289
|
||||
vec4 permute(vec4 x) {
|
||||
return mod((34.0 * x + 1.0) * x, 289.0);
|
||||
}
|
||||
|
||||
// Cellular noise, returning F1 and F2 in a vec2.
|
||||
// Speeded up by using 2x2 search window instead of 3x3,
|
||||
// at the expense of some strong pattern artifacts.
|
||||
// F2 is often wrong and has sharp discontinuities.
|
||||
// If you need a smooth F2, use the slower 3x3 version.
|
||||
// F1 is sometimes wrong, too, but OK for most purposes.
|
||||
vec2 cellular2x2(vec2 P) {
|
||||
#define K 0.142857142857 // 1/7
|
||||
#define K2 0.0714285714285 // K/2
|
||||
#define jitter 0.8 // jitter 1.0 makes F1 wrong more often
|
||||
vec2 Pi = mod(floor(P), 289.0);
|
||||
vec2 Pf = fract(P);
|
||||
vec4 Pfx = Pf.x + vec4(-0.5, -1.5, -0.5, -1.5);
|
||||
vec4 Pfy = Pf.y + vec4(-0.5, -0.5, -1.5, -1.5);
|
||||
vec4 p = permute(Pi.x + vec4(0.0, 1.0, 0.0, 1.0));
|
||||
p = permute(p + Pi.y + vec4(0.0, 0.0, 1.0, 1.0));
|
||||
vec4 ox = mod(p, 7.0)*K+K2;
|
||||
vec4 oy = mod(floor(p*K),7.0)*K+K2;
|
||||
vec4 dx = Pfx + jitter*ox;
|
||||
vec4 dy = Pfy + jitter*oy;
|
||||
vec4 d = dx * dx + dy * dy; // d11, d12, d21 and d22, squared
|
||||
// Sort out the two smallest distances
|
||||
#if 0
|
||||
// Cheat and pick only F1
|
||||
d.xy = min(d.xy, d.zw);
|
||||
d.x = min(d.x, d.y);
|
||||
return d.xx; // F1 duplicated, F2 not computed
|
||||
#else
|
||||
// Do it right and find both F1 and F2
|
||||
d.xy = (d.x < d.y) ? d.xy : d.yx; // Swap if smaller
|
||||
d.xz = (d.x < d.z) ? d.xz : d.zx;
|
||||
d.xw = (d.x < d.w) ? d.xw : d.wx;
|
||||
d.y = min(d.y, d.z);
|
||||
d.y = min(d.y, d.w);
|
||||
return sqrt(d.xy);
|
||||
#endif
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
vec2 st = gl_FragCoord.xy/u_resolution.xy;
|
||||
|
||||
st *= 10.;
|
||||
vec2 F = cellular2x2(st);
|
||||
float n = 1.0-1.5*F.x;
|
||||
gl_FragColor = vec4(n, n, n, 1.0);
|
||||
}
|
@ -0,0 +1,88 @@
|
||||
#ifdef GL_ES
|
||||
precision mediump float;
|
||||
#endif
|
||||
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_time;
|
||||
|
||||
// Cellular noise ("Worley noise") in 3D in GLSL.
|
||||
// Copyright (c) Stefan Gustavson 2011-04-19. All rights reserved.
|
||||
// This code is released under the conditions of the MIT license.
|
||||
// See LICENSE file for details.
|
||||
|
||||
// Permutation polynomial: (34x^2 + x) mod 289
|
||||
vec4 permute(vec4 x) {
|
||||
return mod((34.0 * x + 1.0) * x, 289.0);
|
||||
}
|
||||
vec3 permute(vec3 x) {
|
||||
return mod((34.0 * x + 1.0) * x, 289.0);
|
||||
}
|
||||
|
||||
// Cellular noise, returning F1 and F2 in a vec2.
|
||||
// Speeded up by using 2x2x2 search window instead of 3x3x3,
|
||||
// at the expense of some pattern artifacts.
|
||||
// F2 is often wrong and has sharp discontinuities.
|
||||
// If you need a good F2, use the slower 3x3x3 version.
|
||||
vec2 cellular2x2x2(vec3 P) {
|
||||
#define K 0.142857142857 // 1/7
|
||||
#define Ko 0.428571428571 // 1/2-K/2
|
||||
#define K2 0.020408163265306 // 1/(7*7)
|
||||
#define Kz 0.166666666667 // 1/6
|
||||
#define Kzo 0.416666666667 // 1/2-1/6*2
|
||||
#define jitter 0.8 // smaller jitter gives less errors in F2
|
||||
vec3 Pi = mod(floor(P), 289.0);
|
||||
vec3 Pf = fract(P);
|
||||
vec4 Pfx = Pf.x + vec4(0.0, -1.0, 0.0, -1.0);
|
||||
vec4 Pfy = Pf.y + vec4(0.0, 0.0, -1.0, -1.0);
|
||||
vec4 p = permute(Pi.x + vec4(0.0, 1.0, 0.0, 1.0));
|
||||
p = permute(p + Pi.y + vec4(0.0, 0.0, 1.0, 1.0));
|
||||
vec4 p1 = permute(p + Pi.z); // z+0
|
||||
vec4 p2 = permute(p + Pi.z + vec4(1.0)); // z+1
|
||||
vec4 ox1 = fract(p1*K) - Ko;
|
||||
vec4 oy1 = mod(floor(p1*K), 7.0)*K - Ko;
|
||||
vec4 oz1 = floor(p1*K2)*Kz - Kzo; // p1 < 289 guaranteed
|
||||
vec4 ox2 = fract(p2*K) - Ko;
|
||||
vec4 oy2 = mod(floor(p2*K), 7.0)*K - Ko;
|
||||
vec4 oz2 = floor(p2*K2)*Kz - Kzo;
|
||||
vec4 dx1 = Pfx + jitter*ox1;
|
||||
vec4 dy1 = Pfy + jitter*oy1;
|
||||
vec4 dz1 = Pf.z + jitter*oz1;
|
||||
vec4 dx2 = Pfx + jitter*ox2;
|
||||
vec4 dy2 = Pfy + jitter*oy2;
|
||||
vec4 dz2 = Pf.z - 1.0 + jitter*oz2;
|
||||
vec4 d1 = dx1 * dx1 + dy1 * dy1 + dz1 * dz1; // z+0
|
||||
vec4 d2 = dx2 * dx2 + dy2 * dy2 + dz2 * dz2; // z+1
|
||||
|
||||
// Sort out the two smallest distances (F1, F2)
|
||||
#if 0
|
||||
// Cheat and sort out only F1
|
||||
d1 = min(d1, d2);
|
||||
d1.xy = min(d1.xy, d1.wz);
|
||||
d1.x = min(d1.x, d1.y);
|
||||
return sqrt(d1.xx);
|
||||
#else
|
||||
// Do it right and sort out both F1 and F2
|
||||
vec4 d = min(d1,d2); // F1 is now in d
|
||||
d2 = max(d1,d2); // Make sure we keep all candidates for F2
|
||||
d.xy = (d.x < d.y) ? d.xy : d.yx; // Swap smallest to d.x
|
||||
d.xz = (d.x < d.z) ? d.xz : d.zx;
|
||||
d.xw = (d.x < d.w) ? d.xw : d.wx; // F1 is now in d.x
|
||||
d.yzw = min(d.yzw, d2.yzw); // F2 now not in d2.yzw
|
||||
d.y = min(d.y, d.z); // nor in d.z
|
||||
d.y = min(d.y, d.w); // nor in d.w
|
||||
d.y = min(d.y, d2.x); // F2 is now in d.y
|
||||
return sqrt(d.xy); // F1 and F2
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
varying vec3 vTexCoord3D;
|
||||
|
||||
void main(void) {
|
||||
vec2 st = gl_FragCoord.xy/u_resolution.xy;
|
||||
|
||||
st *= 10.;
|
||||
vec2 F = cellular2x2x2(vec3(st,u_time));
|
||||
float n = smoothstep(0.4, 0.5, F.x);
|
||||
gl_FragColor = vec4(n, n, n, 1.0);
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
// Author @patriciogv - 2015
|
||||
|
||||
#ifdef GL_ES
|
||||
precision mediump float;
|
||||
#endif
|
||||
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_time;
|
||||
|
||||
// Permutation polynomial: (34x^2 + x) mod 289
|
||||
vec3 permute(vec3 x) {
|
||||
return mod((34.0 * x + 1.0) * x, 289.0);
|
||||
}
|
||||
|
||||
// Cellular noise, returning F1 and F2 in a vec2.
|
||||
// Standard 3x3 search window for good F1 and F2 values
|
||||
vec2 cellular(vec2 P) {
|
||||
#define K 0.142857142857 // 1/7
|
||||
#define Ko 0.428571428571 // 3/7
|
||||
#define jitter 1.0 // Less gives more regular pattern
|
||||
vec2 Pi = mod(floor(P), 289.0);
|
||||
vec2 Pf = fract(P);
|
||||
vec3 oi = vec3(-1.0, 0.0, 1.0);
|
||||
vec3 of = vec3(-0.5, 0.5, 1.5);
|
||||
vec3 px = permute(Pi.x + oi);
|
||||
vec3 p = permute(px.x + Pi.y + oi); // p11, p12, p13
|
||||
vec3 ox = fract(p*K) - Ko;
|
||||
vec3 oy = mod(floor(p*K),7.0)*K - Ko;
|
||||
vec3 dx = Pf.x + 0.5 + jitter*ox;
|
||||
vec3 dy = Pf.y - of + jitter*oy;
|
||||
vec3 d1 = dx * dx + dy * dy; // d11, d12 and d13, squared
|
||||
p = permute(px.y + Pi.y + oi); // p21, p22, p23
|
||||
ox = fract(p*K) - Ko;
|
||||
oy = mod(floor(p*K),7.0)*K - Ko;
|
||||
dx = Pf.x - 0.5 + jitter*ox;
|
||||
dy = Pf.y - of + jitter*oy;
|
||||
vec3 d2 = dx * dx + dy * dy; // d21, d22 and d23, squared
|
||||
p = permute(px.z + Pi.y + oi); // p31, p32, p33
|
||||
ox = fract(p*K) - Ko;
|
||||
oy = mod(floor(p*K),7.0)*K - Ko;
|
||||
dx = Pf.x - 1.5 + jitter*ox;
|
||||
dy = Pf.y - of + jitter*oy;
|
||||
vec3 d3 = dx * dx + dy * dy; // d31, d32 and d33, squared
|
||||
// Sort out the two smallest distances (F1, F2)
|
||||
vec3 d1a = min(d1, d2);
|
||||
d2 = max(d1, d2); // Swap to keep candidates for F2
|
||||
d2 = min(d2, d3); // neither F1 nor F2 are now in d3
|
||||
d1 = min(d1a, d2); // F1 is now in d1
|
||||
d2 = max(d1a, d2); // Swap to keep candidates for F2
|
||||
d1.xy = (d1.x < d1.y) ? d1.xy : d1.yx; // Swap if smaller
|
||||
d1.xz = (d1.x < d1.z) ? d1.xz : d1.zx; // F1 is in d1.x
|
||||
d1.yz = min(d1.yz, d2.yz); // F2 is now not in d2.yz
|
||||
d1.y = min(d1.y, d1.z); // nor in d1.z
|
||||
d1.y = min(d1.y, d2.x); // F2 is in d1.y, we're done.
|
||||
return sqrt(d1.xy);
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
vec2 st = gl_FragCoord.xy/u_resolution.xy;
|
||||
st *= 10.;
|
||||
vec2 F = cellular(st);
|
||||
float facets = 0.1+(F.y-F.x);
|
||||
float dots = smoothstep(0.05, 0.1, F.x);
|
||||
float n = facets * dots;
|
||||
gl_FragColor = vec4(n, n, n, 1.0);
|
||||
}
|
@ -0,0 +1,193 @@
|
||||
// Author @patriciogv - 2015
|
||||
// http://patriciogonzalezvivo.com
|
||||
|
||||
#ifdef GL_ES
|
||||
precision mediump float;
|
||||
#endif
|
||||
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_time;
|
||||
|
||||
// Cellular noise ("Worley noise") in 3D in GLSL.
|
||||
// Copyright (c) Stefan Gustavson 2011-04-19. All rights reserved.
|
||||
// This code is released under the conditions of the MIT license.
|
||||
// See LICENSE file for details.
|
||||
|
||||
// Permutation polynomial: (34x^2 + x) mod 289
|
||||
vec3 permute(vec3 x) {
|
||||
return mod((34.0 * x + 1.0) * x, 289.0);
|
||||
}
|
||||
|
||||
// Cellular noise, returning F1 and F2 in a vec2.
|
||||
// 3x3x3 search region for good F2 everywhere, but a lot
|
||||
// slower than the 2x2x2 version.
|
||||
// The code below is a bit scary even to its author,
|
||||
// but it has at least half decent performance on a
|
||||
// modern GPU. In any case, it beats any software
|
||||
// implementation of Worley noise hands down.
|
||||
|
||||
vec2 cellular(vec3 P) {
|
||||
#define K 0.142857142857 // 1/7
|
||||
#define Ko 0.428571428571 // 1/2-K/2
|
||||
#define K2 0.020408163265306 // 1/(7*7)
|
||||
#define Kz 0.166666666667 // 1/6
|
||||
#define Kzo 0.416666666667 // 1/2-1/6*2
|
||||
#define jitter 1.0 // smaller jitter gives more regular pattern
|
||||
|
||||
vec3 Pi = mod(floor(P), 289.0);
|
||||
vec3 Pf = fract(P) - 0.5;
|
||||
|
||||
vec3 Pfx = Pf.x + vec3(1.0, 0.0, -1.0);
|
||||
vec3 Pfy = Pf.y + vec3(1.0, 0.0, -1.0);
|
||||
vec3 Pfz = Pf.z + vec3(1.0, 0.0, -1.0);
|
||||
|
||||
vec3 p = permute(Pi.x + vec3(-1.0, 0.0, 1.0));
|
||||
vec3 p1 = permute(p + Pi.y - 1.0);
|
||||
vec3 p2 = permute(p + Pi.y);
|
||||
vec3 p3 = permute(p + Pi.y + 1.0);
|
||||
|
||||
vec3 p11 = permute(p1 + Pi.z - 1.0);
|
||||
vec3 p12 = permute(p1 + Pi.z);
|
||||
vec3 p13 = permute(p1 + Pi.z + 1.0);
|
||||
|
||||
vec3 p21 = permute(p2 + Pi.z - 1.0);
|
||||
vec3 p22 = permute(p2 + Pi.z);
|
||||
vec3 p23 = permute(p2 + Pi.z + 1.0);
|
||||
|
||||
vec3 p31 = permute(p3 + Pi.z - 1.0);
|
||||
vec3 p32 = permute(p3 + Pi.z);
|
||||
vec3 p33 = permute(p3 + Pi.z + 1.0);
|
||||
|
||||
vec3 ox11 = fract(p11*K) - Ko;
|
||||
vec3 oy11 = mod(floor(p11*K), 7.0)*K - Ko;
|
||||
vec3 oz11 = floor(p11*K2)*Kz - Kzo; // p11 < 289 guaranteed
|
||||
|
||||
vec3 ox12 = fract(p12*K) - Ko;
|
||||
vec3 oy12 = mod(floor(p12*K), 7.0)*K - Ko;
|
||||
vec3 oz12 = floor(p12*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox13 = fract(p13*K) - Ko;
|
||||
vec3 oy13 = mod(floor(p13*K), 7.0)*K - Ko;
|
||||
vec3 oz13 = floor(p13*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox21 = fract(p21*K) - Ko;
|
||||
vec3 oy21 = mod(floor(p21*K), 7.0)*K - Ko;
|
||||
vec3 oz21 = floor(p21*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox22 = fract(p22*K) - Ko;
|
||||
vec3 oy22 = mod(floor(p22*K), 7.0)*K - Ko;
|
||||
vec3 oz22 = floor(p22*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox23 = fract(p23*K) - Ko;
|
||||
vec3 oy23 = mod(floor(p23*K), 7.0)*K - Ko;
|
||||
vec3 oz23 = floor(p23*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox31 = fract(p31*K) - Ko;
|
||||
vec3 oy31 = mod(floor(p31*K), 7.0)*K - Ko;
|
||||
vec3 oz31 = floor(p31*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox32 = fract(p32*K) - Ko;
|
||||
vec3 oy32 = mod(floor(p32*K), 7.0)*K - Ko;
|
||||
vec3 oz32 = floor(p32*K2)*Kz - Kzo;
|
||||
|
||||
vec3 ox33 = fract(p33*K) - Ko;
|
||||
vec3 oy33 = mod(floor(p33*K), 7.0)*K - Ko;
|
||||
vec3 oz33 = floor(p33*K2)*Kz - Kzo;
|
||||
|
||||
vec3 dx11 = Pfx + jitter*ox11;
|
||||
vec3 dy11 = Pfy.x + jitter*oy11;
|
||||
vec3 dz11 = Pfz.x + jitter*oz11;
|
||||
|
||||
vec3 dx12 = Pfx + jitter*ox12;
|
||||
vec3 dy12 = Pfy.x + jitter*oy12;
|
||||
vec3 dz12 = Pfz.y + jitter*oz12;
|
||||
|
||||
vec3 dx13 = Pfx + jitter*ox13;
|
||||
vec3 dy13 = Pfy.x + jitter*oy13;
|
||||
vec3 dz13 = Pfz.z + jitter*oz13;
|
||||
|
||||
vec3 dx21 = Pfx + jitter*ox21;
|
||||
vec3 dy21 = Pfy.y + jitter*oy21;
|
||||
vec3 dz21 = Pfz.x + jitter*oz21;
|
||||
|
||||
vec3 dx22 = Pfx + jitter*ox22;
|
||||
vec3 dy22 = Pfy.y + jitter*oy22;
|
||||
vec3 dz22 = Pfz.y + jitter*oz22;
|
||||
|
||||
vec3 dx23 = Pfx + jitter*ox23;
|
||||
vec3 dy23 = Pfy.y + jitter*oy23;
|
||||
vec3 dz23 = Pfz.z + jitter*oz23;
|
||||
|
||||
vec3 dx31 = Pfx + jitter*ox31;
|
||||
vec3 dy31 = Pfy.z + jitter*oy31;
|
||||
vec3 dz31 = Pfz.x + jitter*oz31;
|
||||
|
||||
vec3 dx32 = Pfx + jitter*ox32;
|
||||
vec3 dy32 = Pfy.z + jitter*oy32;
|
||||
vec3 dz32 = Pfz.y + jitter*oz32;
|
||||
|
||||
vec3 dx33 = Pfx + jitter*ox33;
|
||||
vec3 dy33 = Pfy.z + jitter*oy33;
|
||||
vec3 dz33 = Pfz.z + jitter*oz33;
|
||||
|
||||
vec3 d11 = dx11 * dx11 + dy11 * dy11 + dz11 * dz11;
|
||||
vec3 d12 = dx12 * dx12 + dy12 * dy12 + dz12 * dz12;
|
||||
vec3 d13 = dx13 * dx13 + dy13 * dy13 + dz13 * dz13;
|
||||
vec3 d21 = dx21 * dx21 + dy21 * dy21 + dz21 * dz21;
|
||||
vec3 d22 = dx22 * dx22 + dy22 * dy22 + dz22 * dz22;
|
||||
vec3 d23 = dx23 * dx23 + dy23 * dy23 + dz23 * dz23;
|
||||
vec3 d31 = dx31 * dx31 + dy31 * dy31 + dz31 * dz31;
|
||||
vec3 d32 = dx32 * dx32 + dy32 * dy32 + dz32 * dz32;
|
||||
vec3 d33 = dx33 * dx33 + dy33 * dy33 + dz33 * dz33;
|
||||
|
||||
// Sort out the two smallest distances (F1, F2)
|
||||
#if 0
|
||||
// Cheat and sort out only F1
|
||||
vec3 d1 = min(min(d11,d12), d13);
|
||||
vec3 d2 = min(min(d21,d22), d23);
|
||||
vec3 d3 = min(min(d31,d32), d33);
|
||||
vec3 d = min(min(d1,d2), d3);
|
||||
d.x = min(min(d.x,d.y),d.z);
|
||||
return sqrt(d.xx); // F1 duplicated, no F2 computed
|
||||
#else
|
||||
// Do it right and sort out both F1 and F2
|
||||
vec3 d1a = min(d11, d12);
|
||||
d12 = max(d11, d12);
|
||||
d11 = min(d1a, d13); // Smallest now not in d12 or d13
|
||||
d13 = max(d1a, d13);
|
||||
d12 = min(d12, d13); // 2nd smallest now not in d13
|
||||
vec3 d2a = min(d21, d22);
|
||||
d22 = max(d21, d22);
|
||||
d21 = min(d2a, d23); // Smallest now not in d22 or d23
|
||||
d23 = max(d2a, d23);
|
||||
d22 = min(d22, d23); // 2nd smallest now not in d23
|
||||
vec3 d3a = min(d31, d32);
|
||||
d32 = max(d31, d32);
|
||||
d31 = min(d3a, d33); // Smallest now not in d32 or d33
|
||||
d33 = max(d3a, d33);
|
||||
d32 = min(d32, d33); // 2nd smallest now not in d33
|
||||
vec3 da = min(d11, d21);
|
||||
d21 = max(d11, d21);
|
||||
d11 = min(da, d31); // Smallest now in d11
|
||||
d31 = max(da, d31); // 2nd smallest now not in d31
|
||||
d11.xy = (d11.x < d11.y) ? d11.xy : d11.yx;
|
||||
d11.xz = (d11.x < d11.z) ? d11.xz : d11.zx; // d11.x now smallest
|
||||
d12 = min(d12, d21); // 2nd smallest now not in d21
|
||||
d12 = min(d12, d22); // nor in d22
|
||||
d12 = min(d12, d31); // nor in d31
|
||||
d12 = min(d12, d32); // nor in d32
|
||||
d11.yz = min(d11.yz,d12.xy); // nor in d12.yz
|
||||
d11.y = min(d11.y,d12.z); // Only two more to go
|
||||
d11.y = min(d11.y,d11.z); // Done! (Phew!)
|
||||
return sqrt(d11.xy); // F1, F2
|
||||
#endif
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
vec2 st = gl_FragCoord.xy/u_resolution.xy;
|
||||
st *= 10.;
|
||||
|
||||
vec2 F = cellular(vec3(st,u_time));
|
||||
float n = F.y-F.x;
|
||||
gl_FragColor = vec4(n, n, n, 1.0);
|
||||
}
|
@ -1,64 +1,8 @@
|
||||
## Fractal Brownian Motion
|
||||
|
||||
At the end of the previous chapter we were thinking about noise and discovering that in fact noise could be interpreted as audio signals. In fact sound can be constructed by the manipulation of the amplitud and frequency of a sine waves.
|
||||
|
||||
```glsl
|
||||
y = amplitud + sin( frequency );
|
||||
```
|
||||
|
||||
An interesting property of waves in general is that they can be add up. The following graph shows what happen if you add sine waves of different frequencies and amplitudes.
|
||||
|
||||
<div class="simpleFunction" data="
|
||||
float t = 0.01*(-u_time*130.0);
|
||||
y += sin(x*2.1 + t)*4.5;
|
||||
y += sin(x*1.72 + t*1.121)*4.0;
|
||||
y += sin(x*2.221 + t*0.437)*5.0;
|
||||
y += sin(x*3.1122+ t*4.269)*2.5;
|
||||
y *= 0.1;
|
||||
"></div>
|
||||
|
||||
Think on it as the surface of the ocean. Massive amount of water propagating waves across it surface. Waves of different heights (amplitud) and rhythms (frequencies) bouncing and interfering each other.
|
||||
|
||||
Musicians learn long time ago that there are sounds that play well with each other. Those sound, carried by waves of air, vibrate in such a particular way that the resultan sound seams to be bust and enhance. Those sounds are call [harmonics](http://en.wikipedia.org/wiki/Harmonic).
|
||||
|
||||
Back to code, we can add harmonics together and see how the resultant looks like. Try the following code on the previous graph.
|
||||
|
||||
```glsl
|
||||
y = 0.;
|
||||
for( int i = 0; i < 5; ++i) {
|
||||
y += sin(PI*x*float(i))/float(i);
|
||||
}
|
||||
y *= 0.6;
|
||||
```
|
||||
|
||||
As you can see in the above code, on every iteration the frequency increase by the double. By augmenting the number of iterations (chaining the 5 for a 10, a 20 or 50) the wave tends to break into smaller fractions, with more details and sharper fluctuations.
|
||||
|
||||
## Fractal Brownian Motion
|
||||
|
||||
So we try adding different waves together, and the result was chaotic, we add up harmonic waves and the result was a consistent fractal pattern. We can use the best of both worlds and add up harmonic noise waves to exacerbate a noise pattern.
|
||||
|
||||
By adding different octaves of increasing frequencies and decreasing amplitudes of noise we can obtain a bigger level of detail or granularity. This technique is call Fractal Brownian Motion and usually consist on a fractal sum of noise functions.
|
||||
|
||||
Take a look to the following example and progressively change the for loop to do 2,3,4,5,6,7 and 8 iterations. See want happens
|
||||
|
||||
<div class="simpleFunction" data="
|
||||
float a = 0.5;
|
||||
for( int i = 0; i < 1; ++i) {
|
||||
y += a * noise(x);
|
||||
x = x * 2.0;
|
||||
a *= 0.5;
|
||||
}"></div>
|
||||
|
||||
If we apply this one dimensional example to a bidimentional space it will look like the following example:
|
||||
|
||||
<div class="codeAndCanvas" data="2d-fbm.frag"></div>
|
||||
|
||||
## Using Fractal Brownian Motion
|
||||
|
||||
In this [article](http://www.iquilezles.org/www/articles/warp/warp.htm) Iñigo Quilez describe an interesting use of fractal brownian motion constructing patterns by adding successive results of fractal brownian motions.
|
||||
|
||||
Take a look to the code and how it looks
|
||||
|
||||
<div class="codeAndCanvas" data="clouds.frag"></div>
|
||||
|
||||
https://briansharpe.wordpress.com/2011/12/01/optimized-artifact-free-gpu-cellular-noise/
|
||||
http://www.rhythmiccanvas.com/research/papers/worley.pdf
|
||||
http://webstaff.itn.liu.se/~stegu/GLSL-cellular/GLSL-cellular-notes.pdf
|
||||
|
||||
http://www.iquilezles.org/www/articles/voronoise/voronoise.htm
|
||||
http://www.iquilezles.org/www/articles/smoothvoronoi/smoothvoronoi.htm
|
||||
http://www.iquilezles.org/www/articles/voronoilines/voronoilines.htm
|
@ -1,27 +1,67 @@
|
||||
## Fractals
|
||||
## Fractal Brownian Motion
|
||||
|
||||
https://www.shadertoy.com/view/lsX3W4
|
||||
http://www.iquilezles.org/www/articles/warp/warp.htm
|
||||
http://www.iquilezles.org/www/articles/morenoise/morenoise.htm
|
||||
|
||||
https://www.shadertoy.com/view/Mss3Wf
|
||||
Noise is one of those subjects that you can dig and always find new exciting formulas. In fact noise tends to means different things for different people. Musicians will think in audio noise, communicators into interference, and astrophysics on cosmic microwave background. In fact noise could be interpreted as audio signals, and noise as well as sound can be constructed by the manipulation of the amplitud and frequency of the waves that compose it.
|
||||
|
||||
https://www.shadertoy.com/view/4df3Rn
|
||||
```glsl
|
||||
y = amplitud + sin( frequency );
|
||||
```
|
||||
|
||||
https://www.shadertoy.com/view/Mss3R8
|
||||
An interesting property of waves in general is that they can be add up. The following graph shows what happen if you add sine waves of different frequencies and amplitudes.
|
||||
|
||||
https://www.shadertoy.com/view/4dfGRn
|
||||
<div class="simpleFunction" data="
|
||||
float t = 0.01*(-u_time*130.0);
|
||||
y += sin(x*2.1 + t)*4.5;
|
||||
y += sin(x*1.72 + t*1.121)*4.0;
|
||||
y += sin(x*2.221 + t*0.437)*5.0;
|
||||
y += sin(x*3.1122+ t*4.269)*2.5;
|
||||
y *= 0.1;
|
||||
"></div>
|
||||
|
||||
https://www.shadertoy.com/view/lss3zs
|
||||
Think on it as the surface of the ocean. Massive amount of water propagating waves across it surface. Waves of different heights (amplitud) and rhythms (frequencies) bouncing and interfering each other.
|
||||
|
||||
https://www.shadertoy.com/view/4dXGDX
|
||||
Musicians learn long time ago that there are sounds that play well with each other. Those sound, carried by waves of air, vibrate in such a particular way that the resultan sound seams to be bust and enhance. Those sounds are call [harmonics](http://en.wikipedia.org/wiki/Harmonic).
|
||||
|
||||
https://www.shadertoy.com/view/XsXGz2
|
||||
Back to code, we can add harmonics together and see how the resultant looks like. Try the following code on the previous graph.
|
||||
|
||||
https://www.shadertoy.com/view/lls3D7
|
||||
```glsl
|
||||
y = 0.;
|
||||
for( int i = 0; i < 5; ++i) {
|
||||
y += sin(PI*x*float(i))/float(i);
|
||||
}
|
||||
y *= 0.6;
|
||||
```
|
||||
|
||||
https://www.shadertoy.com/view/XdB3DD
|
||||
As you can see in the above code, on every iteration the frequency increase by the double. By augmenting the number of iterations (chaining the 5 for a 10, a 20 or 50) the wave tends to break into smaller fractions, with more details and sharper fluctuations.
|
||||
|
||||
https://www.shadertoy.com/view/XdBSWw
|
||||
## Fractal Brownian Motion
|
||||
|
||||
So we try adding different waves together, and the result was chaotic, we add up harmonic waves and the result was a consistent fractal pattern. We can use the best of both worlds and add up harmonic noise waves to exacerbate a noise pattern.
|
||||
|
||||
By adding different octaves of increasing frequencies and decreasing amplitudes of noise we can obtain a bigger level of detail or granularity. This technique is call Fractal Brownian Motion and usually consist on a fractal sum of noise functions.
|
||||
|
||||
Take a look to the following example and progressively change the for loop to do 2,3,4,5,6,7 and 8 iterations. See want happens
|
||||
|
||||
<div class="simpleFunction" data="
|
||||
float a = 0.5;
|
||||
for( int i = 0; i < 1; ++i) {
|
||||
y += a * noise(x);
|
||||
x = x * 2.0;
|
||||
a *= 0.5;
|
||||
}"></div>
|
||||
|
||||
If we apply this one dimensional example to a bidimentional space it will look like the following example:
|
||||
|
||||
<div class="codeAndCanvas" data="2d-fbm.frag"></div>
|
||||
|
||||
## Using Fractal Brownian Motion
|
||||
|
||||
In this [article](http://www.iquilezles.org/www/articles/warp/warp.htm) Iñigo Quilez describe an interesting use of fractal brownian motion constructing patterns by adding successive results of fractal brownian motions.
|
||||
|
||||
Take a look to the code and how it looks
|
||||
|
||||
<div class="codeAndCanvas" data="clouds.frag"></div>
|
||||
|
||||
https://www.shadertoy.com/view/llfGD2
|
||||
|
||||
https://www.shadertoy.com/view/Mlf3RX
|
@ -1,72 +1,27 @@
|
||||
# Image processing
|
||||
## Fractals
|
||||
|
||||
## Textures
|
||||
https://www.shadertoy.com/view/lsX3W4
|
||||
|
||||
![](01.jpg)
|
||||
https://www.shadertoy.com/view/Mss3Wf
|
||||
|
||||
Graphic cards (GPUs) have special memory types for images. Usually on CPUs images are stores as arrays of bites but on GPUs store images as ```sampler2D``` which is more like a table (or matrix) of floating point vectors. More interestingly is that the values of this *table* of vectors are continously. That means that value between pixels are interpolated in a low level.
|
||||
https://www.shadertoy.com/view/4df3Rn
|
||||
|
||||
In order to use this feature we first need to *upload* the image from the CPU to the GPU, to then pass the ```id``` of the texture to the right [```uniform```](../05). All that happens outside the shader.
|
||||
https://www.shadertoy.com/view/Mss3R8
|
||||
|
||||
Once the texture is loaded and linked to a valid ```uniform sampler2D``` you can ask for specific color value at specific coordinates (formated on a [```vec2```](index.html#vec2.md) variable) usin the [```texture2D()```](index.html#texture2D.md) function which will return a color formated on a [```vec4```](index.html#vec4.md) variable.
|
||||
https://www.shadertoy.com/view/4dfGRn
|
||||
|
||||
```glsl
|
||||
vec4 texture2D(sampler2D texture, vec2 coordinates)
|
||||
```
|
||||
https://www.shadertoy.com/view/lss3zs
|
||||
|
||||
Check the following code where we load Hokusai's Wave (1830) as ```uniform sampler2D u_tex0``` and we call every pixel of it inside the billboard:
|
||||
https://www.shadertoy.com/view/4dXGDX
|
||||
|
||||
<div class="codeAndCanvas" data="texture.frag" data-imgs="hokusai.jpg"></div>
|
||||
https://www.shadertoy.com/view/XsXGz2
|
||||
|
||||
If you pay attention you will note that the coordinates for the texture are normalized! What a surprise right? Textures coordenates are consisten with the rest of the things we had saw and their coordenates are between 0.0 and 1.0 whitch match perfectly with the normalized space coordinates we have been using.
|
||||
https://www.shadertoy.com/view/lls3D7
|
||||
|
||||
Now that you have seen how we load correctly a texture is time to experiment to discover what we can do with it, by trying:
|
||||
https://www.shadertoy.com/view/XdB3DD
|
||||
|
||||
* Scaling the previus texture by half.
|
||||
* Rotating the previus texture 90 degrees.
|
||||
* Hooking the mouse position to the coordenates to move it.
|
||||
https://www.shadertoy.com/view/XdBSWw
|
||||
|
||||
Why you should be excited about textures? Well first of all forget about the sad 255 values for channel, once your image is trasformed into a ```uniform sampler2D``` you have all the values between 0.0 and 1.0 (depending on what you set the ```precision``` to ). That's why shaders can make really beatiful post-processing effects.
|
||||
https://www.shadertoy.com/view/llfGD2
|
||||
|
||||
Second, the [```vec2()```](index.html#vec2.md) means you can get values even between pixels. As we said before the textures are a continum. This means that if you set up your texture correctly you can ask for values all arround the surface of your image and the values will smoothly vary from pixel to pixel with no jumps!
|
||||
|
||||
Finnally, you can setup your image to repeat in the edges, so if you give values over or lower of the normalized 0.0 and 1.0, the values will wrap around starting over.
|
||||
|
||||
All this features makes your images more like an infinit spandex fabric. You can streach and shrinks your texture without noticing the grid of bites they originally where compose of or the ends of it. To experience this take a look to the following code where we distort a texture using [the noise function we already made](../11/).
|
||||
|
||||
<div class="codeAndCanvas" data="texture-noise.frag" data-imgs="hokusai.jpg"></div>
|
||||
|
||||
## Texture resolution
|
||||
|
||||
Aboves examples play well with squared images, where both sides are equal and match our squared billboard. But for non-squared images things can be a little more tricky, and unfortunatly centuries of picturical art and photography found more pleasent to the eye non-squared proportions for images.
|
||||
|
||||
![Joseph Nicéphore Niépce (1826)](nicephore.jpg)
|
||||
|
||||
How we can solve this problem? Well we need to know the original proportions of the image to know how to streatch the texture correctly in order to have the original [*aspect ratio*](http://en.wikipedia.org/wiki/Aspect_ratio). For that the texture width and height is pass to the shader as an ```uniform```. Which in our example framework are pass as an ```uniform vec2``` with the same name of the texture followed with proposition ```Resolution```. Once we have this information on the shader he can get the aspect ration by dividing the ```width``` for the ```height``` of the texture resolution. Finally by multiplying this ratio to the coordinates on ```y``` we will shrink these axis to match the original proportions.
|
||||
|
||||
Uncomment line 21 of the following code to see this in action.
|
||||
|
||||
<div class="codeAndCanvas" data="texture-resolution.frag" data-imgs="nicephore.jpg"></div>
|
||||
|
||||
* What we need to do to center this image?
|
||||
|
||||
## Digital upholstery
|
||||
|
||||
![](03.jpg)
|
||||
|
||||
You may be thinking that this is unnesesary complicated... and you are probably right. Also this way of working with images leave a enought room to different hacks and creative tricks. Try to imagine that you are an upholster and by streaching and folding a fabric over a structure you can create better and new patterns and techniques.
|
||||
|
||||
![Eadweard's Muybridge study of motion](muybridge.jpg)
|
||||
|
||||
This level of craftsmanship links back to some of the first optical experiments ever made. For example on games *sprite animations* are very common, and is inevitably to see on it reminicence to phenakistoscope, zoetrope and praxinoscope.
|
||||
|
||||
This could seam simple but the posibilities of modifing textures coordinates is enormus. For example: .
|
||||
|
||||
<div class="codeAndCanvas" data="texture-sprite.frag" data-imgs="muybridge.jpg"></div>
|
||||
|
||||
Now is your turn:
|
||||
|
||||
* Can you make a kaleidoscope using what we have learn?
|
||||
* What other optical toys can you re-create using textures?
|
||||
|
||||
In the next chapters we will learn how to do some image processing using shaders. You will note that finnaly the complexity of shader makes sense, because was in a big sense designed to do this type of process. We will start doing some image operations!
|
||||
https://www.shadertoy.com/view/Mlf3RX
|
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 55 KiB |
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 218 KiB |
Before Width: | Height: | Size: 177 KiB After Width: | Height: | Size: 105 KiB |
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 40 KiB |
@ -1,18 +1,72 @@
|
||||
## Image operations
|
||||
# Image processing
|
||||
|
||||
## Textures
|
||||
|
||||
### Invert
|
||||
![](01.jpg)
|
||||
|
||||
<div class="codeAndCanvas" data="inv.frag" data-imgs="00.jpg,01.jpg"></div>
|
||||
Graphic cards (GPUs) have special memory types for images. Usually on CPUs images are stores as arrays of bites but on GPUs store images as ```sampler2D``` which is more like a table (or matrix) of floating point vectors. More interestingly is that the values of this *table* of vectors are continously. That means that value between pixels are interpolated in a low level.
|
||||
|
||||
### Add, Substract, Multiply and others
|
||||
In order to use this feature we first need to *upload* the image from the CPU to the GPU, to then pass the ```id``` of the texture to the right [```uniform```](../05). All that happens outside the shader.
|
||||
|
||||
![](02.jpg)
|
||||
Once the texture is loaded and linked to a valid ```uniform sampler2D``` you can ask for specific color value at specific coordinates (formated on a [```vec2```](index.html#vec2.md) variable) usin the [```texture2D()```](index.html#texture2D.md) function which will return a color formated on a [```vec4```](index.html#vec4.md) variable.
|
||||
|
||||
<div class="codeAndCanvas" data="operations.frag" data-imgs="00.jpg,01.jpg"></div>
|
||||
```glsl
|
||||
vec4 texture2D(sampler2D texture, vec2 coordinates)
|
||||
```
|
||||
|
||||
### PS Blending modes
|
||||
Check the following code where we load Hokusai's Wave (1830) as ```uniform sampler2D u_tex0``` and we call every pixel of it inside the billboard:
|
||||
|
||||
<div class="codeAndCanvas" data="texture.frag" data-imgs="hokusai.jpg"></div>
|
||||
|
||||
If you pay attention you will note that the coordinates for the texture are normalized! What a surprise right? Textures coordenates are consisten with the rest of the things we had saw and their coordenates are between 0.0 and 1.0 whitch match perfectly with the normalized space coordinates we have been using.
|
||||
|
||||
Now that you have seen how we load correctly a texture is time to experiment to discover what we can do with it, by trying:
|
||||
|
||||
* Scaling the previus texture by half.
|
||||
* Rotating the previus texture 90 degrees.
|
||||
* Hooking the mouse position to the coordenates to move it.
|
||||
|
||||
Why you should be excited about textures? Well first of all forget about the sad 255 values for channel, once your image is trasformed into a ```uniform sampler2D``` you have all the values between 0.0 and 1.0 (depending on what you set the ```precision``` to ). That's why shaders can make really beatiful post-processing effects.
|
||||
|
||||
Second, the [```vec2()```](index.html#vec2.md) means you can get values even between pixels. As we said before the textures are a continum. This means that if you set up your texture correctly you can ask for values all arround the surface of your image and the values will smoothly vary from pixel to pixel with no jumps!
|
||||
|
||||
Finnally, you can setup your image to repeat in the edges, so if you give values over or lower of the normalized 0.0 and 1.0, the values will wrap around starting over.
|
||||
|
||||
All this features makes your images more like an infinit spandex fabric. You can streach and shrinks your texture without noticing the grid of bites they originally where compose of or the ends of it. To experience this take a look to the following code where we distort a texture using [the noise function we already made](../11/).
|
||||
|
||||
<div class="codeAndCanvas" data="texture-noise.frag" data-imgs="hokusai.jpg"></div>
|
||||
|
||||
## Texture resolution
|
||||
|
||||
Aboves examples play well with squared images, where both sides are equal and match our squared billboard. But for non-squared images things can be a little more tricky, and unfortunatly centuries of picturical art and photography found more pleasent to the eye non-squared proportions for images.
|
||||
|
||||
![Joseph Nicéphore Niépce (1826)](nicephore.jpg)
|
||||
|
||||
How we can solve this problem? Well we need to know the original proportions of the image to know how to streatch the texture correctly in order to have the original [*aspect ratio*](http://en.wikipedia.org/wiki/Aspect_ratio). For that the texture width and height is pass to the shader as an ```uniform```. Which in our example framework are pass as an ```uniform vec2``` with the same name of the texture followed with proposition ```Resolution```. Once we have this information on the shader he can get the aspect ration by dividing the ```width``` for the ```height``` of the texture resolution. Finally by multiplying this ratio to the coordinates on ```y``` we will shrink these axis to match the original proportions.
|
||||
|
||||
Uncomment line 21 of the following code to see this in action.
|
||||
|
||||
<div class="codeAndCanvas" data="texture-resolution.frag" data-imgs="nicephore.jpg"></div>
|
||||
|
||||
* What we need to do to center this image?
|
||||
|
||||
## Digital upholstery
|
||||
|
||||
![](03.jpg)
|
||||
|
||||
<div class="codeAndCanvas" data="blend.frag" data-imgs="04.jpg,05.jpg"></div>
|
||||
You may be thinking that this is unnesesary complicated... and you are probably right. Also this way of working with images leave a enought room to different hacks and creative tricks. Try to imagine that you are an upholster and by streaching and folding a fabric over a structure you can create better and new patterns and techniques.
|
||||
|
||||
![Eadweard's Muybridge study of motion](muybridge.jpg)
|
||||
|
||||
This level of craftsmanship links back to some of the first optical experiments ever made. For example on games *sprite animations* are very common, and is inevitably to see on it reminicence to phenakistoscope, zoetrope and praxinoscope.
|
||||
|
||||
This could seam simple but the posibilities of modifing textures coordinates is enormus. For example: .
|
||||
|
||||
<div class="codeAndCanvas" data="texture-sprite.frag" data-imgs="muybridge.jpg"></div>
|
||||
|
||||
Now is your turn:
|
||||
|
||||
* Can you make a kaleidoscope using what we have learn?
|
||||
* What other optical toys can you re-create using textures?
|
||||
|
||||
In the next chapters we will learn how to do some image processing using shaders. You will note that finnaly the complexity of shader makes sense, because was in a big sense designed to do this type of process. We will start doing some image operations!
|
Before Width: | Height: | Size: 516 KiB After Width: | Height: | Size: 516 KiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 1.1 MiB |
Before Width: | Height: | Size: 148 KiB After Width: | Height: | Size: 148 KiB |
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 86 KiB After Width: | Height: | Size: 86 KiB |
@ -1 +1,18 @@
|
||||
## Kernel convolutions
|
||||
## Image operations
|
||||
|
||||
|
||||
### Invert
|
||||
|
||||
<div class="codeAndCanvas" data="inv.frag" data-imgs="00.jpg,01.jpg"></div>
|
||||
|
||||
### Add, Substract, Multiply and others
|
||||
|
||||
![](02.jpg)
|
||||
|
||||
<div class="codeAndCanvas" data="operations.frag" data-imgs="00.jpg,01.jpg"></div>
|
||||
|
||||
### PS Blending modes
|
||||
|
||||
![](03.jpg)
|
||||
|
||||
<div class="codeAndCanvas" data="blend.frag" data-imgs="04.jpg,05.jpg"></div>
|
@ -1 +1 @@
|
||||
## Filters
|
||||
## Kernel convolutions
|
||||
|
@ -0,0 +1 @@
|
||||
## Filters
|
Before Width: | Height: | Size: 480 KiB After Width: | Height: | Size: 480 KiB |
Before Width: | Height: | Size: 516 KiB After Width: | Height: | Size: 516 KiB |
@ -0,0 +1,32 @@
|
||||
<?php
|
||||
|
||||
$path = "..";
|
||||
$README = "README";
|
||||
$language = "";
|
||||
|
||||
if ( !empty($_GET['lan']) ) {
|
||||
if (file_exists($README.'-'.$_GET['lan'].'.md')) {
|
||||
$language = '-'.$_GET['lan'];
|
||||
$README .= $language;
|
||||
}
|
||||
}
|
||||
|
||||
include($path."/header.php");
|
||||
include($path."/chap-header.php");
|
||||
echo '<div id="content">';
|
||||
|
||||
include($path."/src/parsedown/Parsedown.php");
|
||||
$Parsedown = new Parsedown();
|
||||
echo $Parsedown->text(file_get_contents($README.'.md'));
|
||||
|
||||
echo '
|
||||
</div>
|
||||
<hr>
|
||||
<ul class="navigationBar" >
|
||||
<li class="navigationBar" onclick="previusPage()">< < Previous</li>
|
||||
<li class="navigationBar" onclick="homePage()"> Home </li>
|
||||
<li class="navigationBar" onclick="nextPage()">Next > ></li>
|
||||
</ul>';
|
||||
|
||||
include($path."/footer.php");
|
||||
?>
|
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 2.0 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 10 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 2.0 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |