Here you go. attributeVOP for displace along normals. hipnc ACSII attached.
VOP CVEX COde
//
#ifndef VOP_OP
#define VOP_OP
#endif
#ifndef VOP_CVEX
#define VOP_CVEX
#endif
#pragma opname dispAlongNormal_tX
#pragma oplabel “Local Vop Code”
#pragma opmininputs 1
#pragma opmaxinputs 1
#pragma label ptnum “Point Number”
#pragma hint ptnum invisible
#pragma range ptnum 0 10
#pragma label vtxnum “Vertex Number”
#pragma hint vtxnum invisible
#pragma range vtxnum 0 10
#pragma label primnum “Primitive Number”
#pragma hint primnum invisible
#pragma range primnum 0 10
#pragma label id Id
#pragma hint id invisible
#pragma range id 0 10
#pragma label numpt “Number of Points”
#pragma hint numpt invisible
#pragma range numpt 0 10
#pragma label numvtx “Number of Vertices”
#pragma hint numvtx invisible
#pragma range numvtx 0 10
#pragma label numprim “Number of Prims”
#pragma hint numprim invisible
#pragma range numprim 0 10
#pragma label Time Time
#pragma hint Time invisible
#pragma range Time 0 1
#pragma label TimeInc “Time Inc”
#pragma hint TimeInc invisible
#pragma range TimeInc 0 1
#pragma label Frame Frame
#pragma hint Frame invisible
#pragma range Frame 0 1
#pragma label life Life
#pragma hint life invisible
#pragma range life 0 1
#pragma label age Age
#pragma hint age invisible
#pragma range age 0 1
#pragma label OpInput2 “Second Input”
#pragma hint OpInput2 invisible
#pragma label OpInput3 “Third Input”
#pragma hint OpInput3 invisible
#pragma label OpInput4 “Fourth Input”
#pragma hint OpInput4 invisible
#pragma label OpInput1 “First Input”
#pragma hint OpInput1 invisible
#pragma label uv UV
#pragma hint uv invisible
#pragma label P P
#pragma hint P invisible
#pragma label v Velocity
#pragma hint v invisible
#pragma label force Force
#pragma hint force invisible
#pragma label Cd Cd
#pragma hint Cd invisible
#pragma label N N
#pragma hint N invisible
#pragma label map “map: Mandril.pic”
#pragma hint map image
#pragma label bias “Interpolation Bias”
#pragma range bias 0 1
#pragma label scale Scale
#pragma range scale -1 1
#pragma label basis “Noise Type”
#pragma choice basis “value_fast” “Value Noise | Fast”
#pragma choice basis “sparse” “Value Noise | Sparse Convolution”
#pragma choice basis “alligator” “Value Noise | Alligator”
#pragma choice basis “perlin” “Perlin”
#pragma choice basis “flow” “Perlin | Flow”
#pragma choice basis “simplex” “Simplex”
#pragma choice basis “worleyFA” “Worley/Cellular | F1”
#pragma choice basis “worleyFB” “Worley/Cellular | F2-F1”
#pragma choice basis “mworleyFA” “Worley/Cellular | Manhattan F1”
#pragma choice basis “mworleyFB” “Worley/Cellular | Manhattan F2-F1”
#pragma choice basis “cworleyFA” “Worley/Cellular | Chebyshev F1”
#pragma choice basis “cworleyFB” “Worley/Cellular | Chebyshev F2-F1”
#pragma label freq Frequency
#pragma label offset Offset
#include <pyro_utils.h>
#include <pyro_noise.h>
#include <voptype.h>
#include <voplib.h>
#include <voptype.h>
#include <voplib.h>
#include <shaderlayer.h>
#include <voptype.h>
#include <voplib.h>
#include <voptype.h>
#include <voplib.h>
cvex
obj_geo1_dispAlongNormal_tX(int ptnum = 0;
int vtxnum = 0;
int primnum = 0;
int id = -1;
int numpt = 0;
int numvtx = 0;
int numprim = 0;
float Time = 0;
float TimeInc = 0;
float Frame = 0;
float life = 0;
float age = 0;
string OpInput2 = “”;
string OpInput3 = “”;
string OpInput4 = “”;
string OpInput1 = “”;
vector uv = { 0, 0, 0 };
export vector P = { 0, 0, 0 };
vector v = { 0, 0, 0 };
vector force = { 0, 0, 0 };
vector Cd = { 1, 1, 1 };
export vector N = { 0, 0, 0 };
string map = “GProjectsLocal/MayaGP/sourceimages/cobblestone_disp.rat”;
float bias = 0.28299999999999997;
float scale = 1;
string basis = “value_fast”;
vector4 freq = { 4, 4, 4, 4 };
vector4 offset = { 0, 0, 0, 0 })
{
vector P_tmp;
vector v_tmp;
vector force_tmp;
vector Cd_tmp;
vector uv_tmp;
vector N_tmp;
vector uv_tmp1;
int bound_uv;
vector uv_tmp2;
int bound_uv1;
vector noise;
float x_avg;
float x_oct;
vector x_off;
float output1;
float output2;
float output3;
int bool1;
vector uv3;
vector result;
vector result1;
float fval1;
float fval2;
float fval3;
int bool2;
vector uv5;
vector result2;
vector result3;
float fval11;
float fval21;
float fval31;
vector result4;
vector _uv1;
vector4 clr1;
vector output11;
float fval12;
float fval22;
float fval32;
float scaled;
float blend;
vector dispP;
vector dispN;
// Code produced by: geometryvopglobal1
vector P1 = vector();
vector v1 = vector();
vector force1 = vector();
float age1 = 0.0;
float life1 = 0.0;
int id1 = 0;
vector Cd1 = vector();
vector uv1 = vector();
vector N1 = vector();
float Time1 = 0.0;
float TimeInc1 = 0.0;
float Frame1 = 0.0;
int ptnum1 = 0;
int primnum1 = 0;
int vtxnum1 = 0;
int numpt1 = 0;
int numprim1 = 0;
int numvtx1 = 0;
string OpInput11 = “”;
string OpInput21 = “”;
string OpInput31 = “”;
string OpInput41 = “”;
{
// Code produced by: geometryvopglobal1/P
P_tmp = P;
// Code produced by: geometryvopglobal1/v
v_tmp = v;
// Code produced by: geometryvopglobal1/force
force_tmp = force;
// Code produced by: geometryvopglobal1/Cd
Cd_tmp = Cd;
// Code produced by: geometryvopglobal1/uv
uv_tmp = uv;
// Code produced by: geometryvopglobal1/N
N_tmp = N;
// Code produced by: geometryvopglobal1/suboutput1
P1 = P_tmp;
v1 = v_tmp;
force1 = force_tmp;
age1 = age;
life1 = life;
id1 = id;
Cd1 = Cd_tmp;
uv1 = uv_tmp;
N1 = N_tmp;
Time1 = Time;
TimeInc1 = TimeInc;
Frame1 = Frame;
ptnum1 = ptnum;
primnum1 = primnum;
vtxnum1 = vtxnum;
numpt1 = numpt;
numprim1 = numprim;
numvtx1 = numvtx;
OpInput11 = OpInput1;
OpInput21 = OpInput2;
OpInput31 = OpInput3;
OpInput41 = OpInput4;
}
// Code produced by: unifiednoise1
noise = 0;
x_avg = 0;
x_oct = 0;
x_off = 0;
#if !strcmp(“v3”, f1)
#define rtype float
#define ptype float
#elif !strcmp(“v3”, f2)
#define rtype float
#define ptype vector2
#elif !strcmp(“v3”, default)
#define rtype float
#define ptype vector
#elif !strcmp(“v3”, f4)
#define rtype float
#define ptype vector4
#elif !strcmp(“v3”, fd)
#define rtype float
#define ptype vector
#elif !strcmp(“v3”, fd4)
#define rtype float
#define ptype vector4
#elif !strcmp(“v3”, v1)
#define rtype vector
#define ptype float
#elif !strcmp(“v3”, v2)
#define rtype vector
#define ptype vector2
#elif !strcmp(“v3”, v3)
#define rtype vector
#define ptype vector
#elif !strcmp(“v3”, v4)
#define rtype vector
#define ptype vector4
#elif !strcmp(“v3”, vd)
#define rtype vector
#define ptype vector
#elif !strcmp(“v3”, vd4)
#define rtype vector
#define ptype vector4
#endif
if(1)
{
float dfreq = 1;
if(1)
dfreq = (int)1;
#if !strcmp(“v3”, “fd”) || !strcmp(“v3”, “vd”)
int dual = 1;
ptype p1 = (ptype)(P1.rest * freq - offset);
ptype p2 = (ptype)(P1.rest2 * freq - offset);
float kp1 = P1.rest_ratio;
float kp2 = P1.rest2_ratio;
#elif !strcmp(“v3”, “fd4”) || !strcmp(“v3”, “vd4”)
int dual = 1;
ptype p1 = (ptype)P1.rest;
ptype p2 = (ptype)P1.rest2;
p1.w = P1.time;
p2.w = P1.time;
p1 = p1 * freq - offset;
p2 = p2 * freq - offset;
float kp1 = P1.rest_ratio;
float kp2 = P1.rest2_ratio;
#else
int dual = 0;
ptype p1 = (ptype)(P1 * freq - offset);
ptype p2 = (ptype)p1;
float kp1 = 1;
float kp2 = 0;
#endif
ptype tperiod = (ptype) { 10, 10, 10, 10 };
if(dual) {
}
#if 0
float fwidth1 = 0.0;
float fwidth2 = 0.0;
#else
float fwidth1 = VOPFW(p1);
float fwidth2 = dual ? VOPFW(p2) : fwidth1;
#endif
// unified_noise parms which we don't expose, since they're sort
// of covered by the output correction
int inv = 0;
float expon = 1.0;
float fmax = max(freq);
float lw = 0*fmax;
float fs1 = fwidth1 * 1;
float fs2 = fwidth2 * 1;
float e = max(0,expon);
rtype n1=0, n2=0;
string callname = basis;
if (1)
callname = “p” + callname;
if(“hmfT” == “none”)
{
if(kp1>0) n1 = unified_noise(callname, p1,inv,0,0,0, fs1,e,tperiod,
0,0, lw,dfreq,0,0,
x_avg,x_off);
if(kp2!=0) n2 = unified_noise(callname, p2,inv,0,0,0, fs2,e,tperiod,
0,0, lw,dfreq,0,0,
x_avg,x_off);
}
else
{
//printf(“%s\n”, basis);
if(kp1>0) n1 = unified_fractal_noise(“hmfT”, callname, p1,inv,0,0,0, fs1,e,tperiod,
0,0, lw,dfreq,0,0,
x_avg,x_off, //export args
3.1000000000000001,2.01234,0.24399999999999999, // fractal args
x_oct); // fractal export args
if(kp2!=0) n2 = unified_fractal_noise(“hmfT”, callname, p2,inv,0,0,0, fs2,e,tperiod,
0,0, lw,dfreq,0,0,
x_avg,x_off, // export args
3.1000000000000001,2.01234,0.24399999999999999, // fractal args
x_oct); // fractal export args
}
noise = n1*kp1 + n2*kp2;
noise = noise_cc(noise, 0,
0,{ 0.5, 0.5, 0.5 },
0,{ 0.5, 0.5, 0.5 },
0,{ 0, 0, 0 },{ 1, 1, 1 }, { 0.23000000000000001, 0.23000000000000001, 0.23000000000000001 });
}
#undef rtype
#undef ptype
// Code produced by: autoconvert
vop_vectofloat(noise, output1, output2, output3);
// Code produced by: uvcoords1
string _mode = “uv”;
vector uv2 = vector();
float u = 0.0;
float v2 = 0.0;
float w = 0.0;
{
// Code produced by: uvcoords1/compare1
bool1 = (_mode == “st”);
// Code produced by: uvcoords1/parm1
bound_uv = isbound(“uv”);
uv_tmp1 = uv;
// Code produced by: uvcoords1/s_t
#ifdef VOP_SHADING
uv3 = set(s, t, 0);
#else
uv3 = {0,0,0};
#endif
// Code produced by: uvcoords1/switch1
if( bound_uv == 0 )
result = uv3;
else
result = uv_tmp1;
// Code produced by: uvcoords1/st_mode
if( bool1 == 0 )
result1 = result;
else
result1 = uv3;
// Code produced by: uvcoords1/vectofloat1
vop_vectofloat(result1, fval1, fval2, fval3);
// Code produced by: uvcoords1/suboutput1
uv2 = result1;
u = fval1;
v2 = fval2;
w = fval3;
}
// Code produced by: texture1
vector _uv = uv2;
vector _duv = vector();
string _map = map;
int _udim = 1;
string _srccolorspace = “auto”;
string _wrap = “decal”;
string _filter = “gauss”;
float _width = 1;
float _blur = 0;
float _pixelblur = 0;
vector4 _border = { 0, 0, 0, 1 };
int _extrapol = 0;
int _interp = 0;
vector4 _defclr = { 0, 0, 0, 0 };
string _channel = “”;
int _ptexface = 0;
int _orient = 0;
vector4 clr = vector4();
{
// Code produced by: texture1/uvcoords1
string _mode1 = “uv”;
vector uv4 = vector();
float u1 = 0.0;
float v3 = 0.0;
float w1 = 0.0;
{
// Code produced by: texture1/uvcoords1/compare1
bool2 = (_mode1 == “st”);
// Code produced by: texture1/uvcoords1/parm1
bound_uv1 = isbound(“uv”);
uv_tmp2 = uv;
// Code produced by: texture1/uvcoords1/s_t
#ifdef VOP_SHADING
uv5 = set(s, t, 0);
#else
uv5 = {0,0,0};
#endif
// Code produced by: texture1/uvcoords1/switch1
if( bound_uv1 == 0 )
result2 = uv5;
else
result2 = uv_tmp2;
// Code produced by: texture1/uvcoords1/st_mode
if( bool2 == 0 )
result3 = result2;
else
result3 = uv5;
// Code produced by: texture1/uvcoords1/vectofloat1
vop_vectofloat(result3, fval11, fval21, fval31);
// Code produced by: texture1/uvcoords1/suboutput1
uv4 = result3;
u1 = fval11;
v3 = fval21;
w1 = fval31;
}
// Code produced by: texture1/ifconnected1
result4 = 1 != 0 ? _uv : uv4;
// Code produced by: texture1/null1
_uv1 = result4;
// Code produced by: texture1/inline1
if (_map != “”) {
#if defined(VOP_SHADING)
int fptex = 0 ? _ptexface : getptextureid();
#else
int fptex = _ptexface;
#endif
string wrapmode = _wrap;
if (_orient & 0x02) { _uv1.y = 1 - _uv1.y; }
if (_orient & 0x04) { float x = _uv1.x; _uv1.x = _uv1.y; _uv1.y = x; }
string _map_udim = _map;
if (_udim) { _map_udim = expand_udim(_uv1.x, _uv1.y, _map); }
if (_map_udim == ‘') { clr1 = set(_defclr); }
else {
if (_map_udim != _map) {
_uv1.x %= 1.0; _uv1.y %= 1.0;
wrapmode = ’streak';
}
if (_orient & 0x01) { _uv1.x = 1 - _uv1.x; }
if (0 != 0) {
vector tduv = 0.5 * _duv;
if (_orient & 0x04) { float x = tduv.x; tduv.x = tduv.y; tduv.y = x; }
clr1 = texture(_map_udim,
_uv1.x - tduv.x, _uv1.y - tduv.y,
_uv1.x + tduv.x, _uv1.y - tduv.y,
_uv1.x + tduv.x, _uv1.y + tduv.y,
_uv1.x - tduv.x, _uv1.y + tduv.y,
“wrap”, wrapmode, “filter”, _filter,
“width”, _width, “border”, _border,
“extrapolate”, _extrapol, “lerp”, _interp,
“channel”, _channel, “face”, fptex,
“blur”, _blur, “pixelblur”, _pixelblur,
“srccolorspace”, _srccolorspace);
} else {
clr1 = texture(_map_udim, _uv1.x, _uv1.y, “wrap”, wrapmode,
“filter”, _filter, “width”, _width,
“border”, _border, “extrapolate”, _extrapol,
“channel”, _channel, “face”, fptex,
“lerp”, _interp,
“blur”, _blur, “pixelblur”, _pixelblur,
“srccolorspace”, _srccolorspace);
}
}
} else {
clr1 = set(_defclr);
}
// Code produced by: texture1/suboutput1
clr = clr1;
}
// Code produced by: autoconvert
output11 = (vector)clr;
// Code produced by: vectofloat1
vop_vectofloat(output11, fval12, fval22, fval32);
// Code produced by: mulconst1
scaled = fval12 * 5;
// Code produced by: mix1
#ifdef __vex
blend = lerp(output1, scaled, bias);
#else
blend = mix(output1, scaled, bias);
#endif
// Code produced by: displacenml1
vop_displaceAlongNormal((1 != 0) ? P1 : P,
(0 != 0) ? vector() : normalize(N),
blend, scale, 0, 0,
1, 1, 0, dispP, dispN);
// Code produced by: geometryvopoutput1
vector _P = dispP;
vector _v = vector();
vector _force = vector();
vector _Cd = vector();
vector _N = dispN;
{
// Code produced by: geometryvopoutput1/P
P = _P;
// Code produced by: geometryvopoutput1/N
N = _N;
}
}
Found 431 posts.
Search results Show results as topic list.
Houdini Indie and Apprentice » Turn mesh into height field
- Dave_ah
- 436 posts
- Offline
Houdini Indie and Apprentice » Weird grid coloring behavior in 17.5.173
- Dave_ah
- 436 posts
- Offline
Weird grid coloring behavior in 17.5.173.
As soon as I add geometry to the scene, the grid in scene view becomes impossible to see. If geometry is deleted or made turned OFF the grid and GUI return to normal.
Basically I set the colors and Gamma of GUI as I prefer. The grid is drawn as slightly lighter. As soon as geometry is added or made visible, the grid inverts its color and is lost in backround. Neither Preferences nor Color Settings fix this.
As soon as I add geometry to the scene, the grid in scene view becomes impossible to see. If geometry is deleted or made turned OFF the grid and GUI return to normal.
Basically I set the colors and Gamma of GUI as I prefer. The grid is drawn as slightly lighter. As soon as geometry is added or made visible, the grid inverts its color and is lost in backround. Neither Preferences nor Color Settings fix this.
Work in Progress » Instant Meshes Bridge Sop
- Dave_ah
- 436 posts
- Offline
Dude! This this app is amazing. Retopo is not my thing, and watching younglings working with ZBrush, Mudbox, 3DCoat, or by hand in Maya,retopo task looks about as much fun as gluing salt onto crackers. But this is frigging cosmic. R U 1 of the authors?
Work in Progress » Water turbidity , boiling, and churning WIP
- Dave_ah
- 436 posts
- Offline
A sim and look dev of boiling , churning water , with moderate turbidity.
GL Flipbook of surface mesh
https://www.youtube.com/watch?v=ypsxzVJy_9M [www.youtube.com]
Base look and color.
https://www.youtube.com/watch?v=eCQqGti2kY8 [www.youtube.com]
Simulation is a FLIP DOP with narrow band option. Vorticity attribute is driving the foaminess amount. Velocity attribute drives motion blur. The scene is lit with single HDR IBL, and one parallel source as sunlight.
I get that the preferred way of doing foam is to sample them from simulated core, then render those particles as points. Which I may do on subsequent versions. However hooking up vorticity to diffuse and color channels of shader, is not a bad way to get decent results without having to do another sim. Even though foam particles are not really simulated, but are sampled from core and get their velocity from it.
Everything was done in Houdini, and rendered in Mantra. Camera has filmback of ARRI Alexa. Rendered at 2048 X 871. 35mm with 2:351 aspect. Full 32 float EXR with autocrop. About 10 minutes per frame. Scene and sim are at 60FPS.
List of elements to add;
Steam rising from hot liquid.
Sim cook time. 1 minute per frame for 600 frames
Surfacing time. Between 1.5 and 2 minutes per frame for 600 frames
Mantra render time. 10-12 minutes per frame for 550 frames.
Cheers and G*d Bless.
GL Flipbook of surface mesh
https://www.youtube.com/watch?v=ypsxzVJy_9M [www.youtube.com]
Base look and color.
https://www.youtube.com/watch?v=eCQqGti2kY8 [www.youtube.com]
Simulation is a FLIP DOP with narrow band option. Vorticity attribute is driving the foaminess amount. Velocity attribute drives motion blur. The scene is lit with single HDR IBL, and one parallel source as sunlight.
I get that the preferred way of doing foam is to sample them from simulated core, then render those particles as points. Which I may do on subsequent versions. However hooking up vorticity to diffuse and color channels of shader, is not a bad way to get decent results without having to do another sim. Even though foam particles are not really simulated, but are sampled from core and get their velocity from it.
Everything was done in Houdini, and rendered in Mantra. Camera has filmback of ARRI Alexa. Rendered at 2048 X 871. 35mm with 2:351 aspect. Full 32 float EXR with autocrop. About 10 minutes per frame. Scene and sim are at 60FPS.
List of elements to add;
Steam rising from hot liquid.
Sim cook time. 1 minute per frame for 600 frames
Surfacing time. Between 1.5 and 2 minutes per frame for 600 frames
Mantra render time. 10-12 minutes per frame for 550 frames.
Cheers and G*d Bless.
Technical Discussion » Krakatoa PRT Reader and PRT Writing SOP/ROP for Houdini 17 and 17.5 . Is it opensource, and where to find it?
- Dave_ah
- 436 posts
- Offline
Yeah. Sadly the version I need it for is build 17.0.459.
Reason I like PRT and Krakatoa, is that Krakatoa does not care where PRT wedges from. Only that they adhere to PRT format and PP attributes on each particle adhere to PRT description. PRT wedges from Houdini, will render on any platform where Krakatoa runs from, or in Krakatoa standalone. Maya particles, when exported by Krakatoa Maya plugin, will render same in KrakatoaMX, KrakatoaMY, or Krakatoa StandAlone. My main interest in Houdini Krakatoa is is that Realflow is a very good generator for PRT wedges, and is still part of my toolkit. With all due reverence and respect to Houdini Particle Mesher SOP, Realflow Mesher, and Thinkbox's Frost are superior to built-in mesh tools in Houdini. Realflow's mesher has feature that stretches droplet along its velocity vector, which makes meshes much more realistic when rendered.
Reason I like PRT and Krakatoa, is that Krakatoa does not care where PRT wedges from. Only that they adhere to PRT format and PP attributes on each particle adhere to PRT description. PRT wedges from Houdini, will render on any platform where Krakatoa runs from, or in Krakatoa standalone. Maya particles, when exported by Krakatoa Maya plugin, will render same in KrakatoaMX, KrakatoaMY, or Krakatoa StandAlone. My main interest in Houdini Krakatoa is is that Realflow is a very good generator for PRT wedges, and is still part of my toolkit. With all due reverence and respect to Houdini Particle Mesher SOP, Realflow Mesher, and Thinkbox's Frost are superior to built-in mesh tools in Houdini. Realflow's mesher has feature that stretches droplet along its velocity vector, which makes meshes much more realistic when rendered.
Technical Discussion » How to connect random points
- Dave_ah
- 436 posts
- Offline
Technical Discussion » Krakatoa PRT Reader and PRT Writing SOP/ROP for Houdini 17 and 17.5 . Is it opensource, and where to find it?
- Dave_ah
- 436 posts
- Offline
3rd Party » Redshift plugin tips
- Dave_ah
- 436 posts
- Offline
Client installation of Redshift Houdini seems to work fine. It is on Win10 Pro, Quadro P4000, 64GB ram, 8 core (16 thread), Houdini 17.0.459 and Redshift. Works great. Scene view IPR , Render View, or external buffer. No issues. Of course I did not set it up. Downside? When Redshift is cranking, GPU usage spike to nearly 100% as does GPU memory usage. If using Chrome playing music or streaming videos in YouTube or Vimeo, there are random audio pops and frame stutters. Google Earth WebGL (using DX11) in Chrome, and Google Earth Pro performance goes to to single digit FPS or fails to start. A small price to pay. But , man Redshift is fast. Kind a wish that Mantra was GPU'ed. One issue that confuses me abit is OpenCL usage in 3D DCC applications. If Houdini (or Maya) are using OpenCL to cook sims (FLIPs in both), does that mean that CUDA cores on Quadro or GTX are utilized or is OpenCL software only. Redshift states that it does not use OpenCL, but does use CUDA. Here I was thinking that OpenCL is OpenGL but for simulation processing.
Technical Discussion » Krakatoa PRT Reader and PRT Writing SOP/ROP for Houdini 17 and 17.5 . Is it opensource, and where to find it?
- Dave_ah
- 436 posts
- Offline
Would love to be able to read and write Krakatoa PRT particle wedges in H17 and 17.5. Anyone know where to get it?
Krakatoa PRT Reader and PRT Writing SOP/ROP for Houdini 17 and 17.5 . Is it opensource, and where to find it?
Thank you advance and G*d bless.
Krakatoa PRT Reader and PRT Writing SOP/ROP for Houdini 17 and 17.5 . Is it opensource, and where to find it?
Thank you advance and G*d bless.
Technical Discussion » Simple render queue for one machine?
- Dave_ah
- 436 posts
- Offline
On a single machine, you don't really need any kind of queing software to Hbatch. Just a little memory and trust that noone will touch the box when you are away.
Set up the ROPs as needed. Those any kind suppourted by Houdini.
Fire them off as Render In Backround from ROP Network or from ROPs that in SOP or DOP or COP context. As you fire each one note the Hbatch PID (Process ID) number in Task Manager (under Win10) or TOP( under Linux). Be sure to set USE ALL THREADS EXCEPT ONE so that OS and browser can still work. Under Linux you can also launch with L option if you want to limit thread count on each Hbatch. Under Windows, this is done on the fly after launch. In Task Manager you can set Thread Affinity and Priority of each Hbatch job as needed. Changing either will not crash Hbatch, but will slow it down, or speed it up ,depending on resources available. On multiple core , high thread rigs, with sufficient memory, it is much less of an issue. The Task Manager and TOP provide CPU, memory, storage, and GPU loads. A high load Hbatch , means that is working. Open the destination folders to view output and progress. Set high verbose output so you can troubleshoot in case of error.
The dark downside of this approach is that your system memory and storage amount must be sufficient. To render each Hbatch one at a time, you do need to set up a render order in ROPnetwork. Then RENDER IN BACKROUND or SAVE TO DISK IN BACKGROUND , the LAST ROP only. Hbatch will process the ROPs in order that you setup, starting with top most ROP Output Driver.
Cheers
Set up the ROPs as needed. Those any kind suppourted by Houdini.
Fire them off as Render In Backround from ROP Network or from ROPs that in SOP or DOP or COP context. As you fire each one note the Hbatch PID (Process ID) number in Task Manager (under Win10) or TOP( under Linux). Be sure to set USE ALL THREADS EXCEPT ONE so that OS and browser can still work. Under Linux you can also launch with L option if you want to limit thread count on each Hbatch. Under Windows, this is done on the fly after launch. In Task Manager you can set Thread Affinity and Priority of each Hbatch job as needed. Changing either will not crash Hbatch, but will slow it down, or speed it up ,depending on resources available. On multiple core , high thread rigs, with sufficient memory, it is much less of an issue. The Task Manager and TOP provide CPU, memory, storage, and GPU loads. A high load Hbatch , means that is working. Open the destination folders to view output and progress. Set high verbose output so you can troubleshoot in case of error.
The dark downside of this approach is that your system memory and storage amount must be sufficient. To render each Hbatch one at a time, you do need to set up a render order in ROPnetwork. Then RENDER IN BACKROUND or SAVE TO DISK IN BACKGROUND , the LAST ROP only. Hbatch will process the ROPs in order that you setup, starting with top most ROP Output Driver.
Cheers
Technical Discussion » 3d Volume from Image?
- Dave_ah
- 436 posts
- Offline
Interesting assignment! My initial though is that you will need a model of the mandril's head. Either model it, or get a geometry from TurboSquid or similar. Then using the image of the mandril , adjust the existing geometry to match as close as you are comfortable with.
When geometry is done, give color (Cd) attribute to the geometry using AttributeFromMap SOP. You will need to use front projection as UV.
Now using the geometry with colored points , generate the point cloud using scatter tools, or particle DOPs.
Next, using DOP I/O SOP, import the DOP POP pointcloud into SOP context , in the same Geometry Object where your mandril head geo resides.
Next use AttributeTransfer SOP pipe point, primitive, and UV attributes from source geometry to the point cloud.
Cap the work with null SOP named nullOUT. That location to render ROP.
On to rendering!
At object level set Render parameters to use Render As Points-Spheres with small radius like .001 or as desired.
When geometry is done, give color (Cd) attribute to the geometry using AttributeFromMap SOP. You will need to use front projection as UV.
Now using the geometry with colored points , generate the point cloud using scatter tools, or particle DOPs.
Next, using DOP I/O SOP, import the DOP POP pointcloud into SOP context , in the same Geometry Object where your mandril head geo resides.
Next use AttributeTransfer SOP pipe point, primitive, and UV attributes from source geometry to the point cloud.
Cap the work with null SOP named nullOUT. That location to render ROP.
On to rendering!
At object level set Render parameters to use Render As Points-Spheres with small radius like .001 or as desired.
The Orbolt Smart 3D Asset Store » 3DCoat-Houdini Applinker
- Dave_ah
- 436 posts
- Offline
Seems to load in 17. Even though DL page states compatibility with versions 13 to 15.5. What am I missing? 17 is back compatible. Correct?
The Orbolt Smart 3D Asset Store » 3DCoat-Houdini Applinker
- Dave_ah
- 436 posts
- Offline
Seems to load in 17. Even though DL page states compatibility with versions 13 to 15.5. What am I missing? 17 is back compatible. Correct?
Technical Discussion » H17 : Pyro simluation collision problem ...
- Dave_ah
- 436 posts
- Offline
There is further approach that I use, as a matter of doctrine, which may or may not be the best way. I avoid , if possible, using substeps higher then 1. If the simulation needs it, I rather simulate with 1 substep, but at higher FPS.
You are probably aware that FLIPs loose their performance advantage over SPH or fluid solvers, if substeps are higher then 1. I find this to be the case in Houdini (up to version 16.5), Maya biFrost (all versions), and Realflow. It is better to sim at 48, or 60 FPS for 24 film/HD, and HD 30FPS. Then to use substeps while keeping sim at 24 or 30 FPS. Granted, this is not a universal ROT . To me sims made FPS 2X of content delivery FPS , looks smoother, more lifelike, organic. Content retains that smoothness when rendered elements are retimed in NUKE for comping at 24FPS. But I am one the few who likes film and HD programming at 48FPS or at 60. Love feature film in Real3D with HFS 48FPS digital projection. However the industry seems to have rejected 3D as a goto standard. Turns out that content that matches FPS to digital display Mhz is optimal for quality. Most digital displays and projectors are at 60Mhz, with prosumer ,pro displays , and venue projectors going to 120, 144, and 240Mhz. My two main panels , on my home rig, are 2560X1440 144Mhz, with third panel being 1920X1080 HD at 60Mhz. Playing content at 60fps or slightly slower 48, on my panels, looks much more organic then 24 or 30 fps.
Example of boiling and churning water simulated at 60FPS.
https://www.youtube.com/watch?v=ypsxzVJy_9M [www.youtube.com]
You are probably aware that FLIPs loose their performance advantage over SPH or fluid solvers, if substeps are higher then 1. I find this to be the case in Houdini (up to version 16.5), Maya biFrost (all versions), and Realflow. It is better to sim at 48, or 60 FPS for 24 film/HD, and HD 30FPS. Then to use substeps while keeping sim at 24 or 30 FPS. Granted, this is not a universal ROT . To me sims made FPS 2X of content delivery FPS , looks smoother, more lifelike, organic. Content retains that smoothness when rendered elements are retimed in NUKE for comping at 24FPS. But I am one the few who likes film and HD programming at 48FPS or at 60. Love feature film in Real3D with HFS 48FPS digital projection. However the industry seems to have rejected 3D as a goto standard. Turns out that content that matches FPS to digital display Mhz is optimal for quality. Most digital displays and projectors are at 60Mhz, with prosumer ,pro displays , and venue projectors going to 120, 144, and 240Mhz. My two main panels , on my home rig, are 2560X1440 144Mhz, with third panel being 1920X1080 HD at 60Mhz. Playing content at 60fps or slightly slower 48, on my panels, looks much more organic then 24 or 30 fps.
Example of boiling and churning water simulated at 60FPS.
https://www.youtube.com/watch?v=ypsxzVJy_9M [www.youtube.com]
Edited by Dave_ah - March 18, 2019 11:49:19
Technical Discussion » H17 : Pyro simluation collision problem ...
- Dave_ah
- 436 posts
- Offline
In DOP's Static Object DOP which is upstream of staticSolver DOP, there are options for this.
Set Display Geometry to OFF.
In RBD Solver tab Volume sub-tab;
Set Collision Guide to ON. Choose your color. I use either blue or yellow.
Set Mode to ‘Volume Sample’
Set Division Method to `By Size'
Set Division Size to a desired value (in meters). In my example it is .225
Set Fix Signs to ON
Set Force Bounds to OFF.
Set Proxy Volume to desired location. In my example it is set to ‘/obj/OgreBotFX/ogreBot/VDB’. H17 should set this automatically for u.
Experiment with other settings in volume sub-tab. For complex collider , you may want to cache the VDB and read back using File Mode setting.
Link below to YT vid showing VDB collision volumes sampled from arbitrary animated geometry.
Good luck and cheers.
Houdini used was V17 Apprentice. 17.0.459
https://www.youtube.com/watch?v=-atR755StoM [www.youtube.com]
Set Display Geometry to OFF.
In RBD Solver tab Volume sub-tab;
Set Collision Guide to ON. Choose your color. I use either blue or yellow.
Set Mode to ‘Volume Sample’
Set Division Method to `By Size'
Set Division Size to a desired value (in meters). In my example it is .225
Set Fix Signs to ON
Set Force Bounds to OFF.
Set Proxy Volume to desired location. In my example it is set to ‘/obj/OgreBotFX/ogreBot/VDB’. H17 should set this automatically for u.
Experiment with other settings in volume sub-tab. For complex collider , you may want to cache the VDB and read back using File Mode setting.
Link below to YT vid showing VDB collision volumes sampled from arbitrary animated geometry.
Good luck and cheers.
Houdini used was V17 Apprentice. 17.0.459
https://www.youtube.com/watch?v=-atR755StoM [www.youtube.com]
Edited by Dave_ah - March 18, 2019 09:12:23
Technical Discussion » H17 : Pyro simluation collision problem ...
- Dave_ah
- 436 posts
- Offline
Subdivide your walls. Large flat quads just don't make good colliders. I experienced this in Maya and Houdini, with voxels and flips. My rule of thumb, is no polygon in the collider is to be larger then 1 unit of grid in scene. Houdini DOPs are 1 meter based. Maya Dynamics are 1cm. Maya BiFrost is 1 meter. In Houdini I use VDB volume collision. For fast moving fluids, it may help to have a repulsion (a negative attractor) defined by collision geometry, to push FLIP pops and pyro voxels away, as well as collide.
Technical Discussion » FLIP fluid meshing producing moire stair step artifacts
- Dave_ah
- 436 posts
- Offline
https://youtu.be/dW_ibs5OAQ8 [youtu.be]
WIP, very early. Boiling hot liquid, churning with vicious carnivorous extremophile lifeforms (under NDA sorry!). Collision bodies left out of GL flipbook. Each collision body is about size of an adult human male head, for scale reference.
Houdini DOP units and scene units are in Meters. Ambient DOP gravity is -98.065 m/s^2. Shot is at 60fps.
Simulation and surface done in Houdini 17.0.459, via FLIP particle fluid DOP.
Issue at hand : Moire banding and stair step on the output rendering mesh. I uncertain as to where moire banding is from. Is it in the result of DOP simulation, or an artifact of meshing? Meshing is done by Particle Fluid Surface SOP.
I tried various settings on smoothing , particle separation (in SOP), and various voxel sizes in DOP. But moire banding and stepping is still present in the mesh.
If anyone is familiar with this issue, and possible solutions, please chime in.
URL on top of post.
Thank you and G*d bless.
WIP, very early. Boiling hot liquid, churning with vicious carnivorous extremophile lifeforms (under NDA sorry!). Collision bodies left out of GL flipbook. Each collision body is about size of an adult human male head, for scale reference.
Houdini DOP units and scene units are in Meters. Ambient DOP gravity is -98.065 m/s^2. Shot is at 60fps.
Simulation and surface done in Houdini 17.0.459, via FLIP particle fluid DOP.
Issue at hand : Moire banding and stair step on the output rendering mesh. I uncertain as to where moire banding is from. Is it in the result of DOP simulation, or an artifact of meshing? Meshing is done by Particle Fluid Surface SOP.
I tried various settings on smoothing , particle separation (in SOP), and various voxel sizes in DOP. But moire banding and stepping is still present in the mesh.
If anyone is familiar with this issue, and possible solutions, please chime in.
URL on top of post.
Thank you and G*d bless.
Edited by Dave_ah - March 14, 2019 09:12:45
Technical Discussion » COP FileIn EXR 16 bit floating point image from MR and Vray
- Dave_ah
- 436 posts
- Offline
I am in a mixed pipeline. Maya, Houdini, Flame, Nuke. Renderers are Maya Mental Ray, Vray. Mantra on Houdini only, but all renderings are done through Maya and we are using Alembic as an exchange, which is working great. Thank you SESI and Autodesk, also Imageworks and ILM.
I having trouble bringing in EXR 16 bit floating point files into COP. The alpha channell is unrecognized and FileIN COP reports a file error. Image sequences are out of Maya Mental Ray and Vray. EXRs out of Mantra are OK. I tried uncompressed, default, and RLE compression settings. Here is the weird part. If I just QC the sequence using MPLAY it loads the sequence correctly, except that A is not identified , only C is available. But if alpha button is pressed on the bottom of MPLAY it displays the alpha. I suspect that COPs are reading the alpha , but not as alpha but as an extended ‘deep’ raster. Nuke reads the file EXRs just fine.
I having trouble bringing in EXR 16 bit floating point files into COP. The alpha channell is unrecognized and FileIN COP reports a file error. Image sequences are out of Maya Mental Ray and Vray. EXRs out of Mantra are OK. I tried uncompressed, default, and RLE compression settings. Here is the weird part. If I just QC the sequence using MPLAY it loads the sequence correctly, except that A is not identified , only C is available. But if alpha button is pressed on the bottom of MPLAY it displays the alpha. I suspect that COPs are reading the alpha , but not as alpha but as an extended ‘deep’ raster. Nuke reads the file EXRs just fine.
Houdini Engine for Maya » Houdini Engine for Maya 2015
- Dave_ah
- 436 posts
- Offline
I mean a 4TB limit for single ABC file. As when Maya BiFrost writes mesh derived from BiFrost sim. I don't like single file format caches. I like one file per frame with padd of 4 or 5. name.####.ext. As I do when using ROP_SOP, or Maya's MCX geo cache, or bin sequence in Realflow.
I want to give that FileIn OTL a try. Its so simple, I was not able to think of it.
Thank you.
I want to give that FileIn OTL a try. Its so simple, I was not able to think of it.
Thank you.
Houdini Engine for Maya » namespaces in Maya
- Dave_ah
- 436 posts
- Offline
fileName_v#.XXXX.ext
daverwork_001.0001.exr
Use underscrores in spaces and `.' between padding numbers and file type extentions.
daverwork_001.0001.exr
Use underscrores in spaces and `.' between padding numbers and file type extentions.
-
- Quick Links