Hi everyone.
I'm working on a project which renders voxel meshes with OgreNext 3.0. I have optimised parts of it to be able to pack voxel definitions better into buffers, for example I only use 8 bits for each of x,y and z rather than 32 bit floats. Also normals and texture coordinates are packed into a few uints and then unpacked in the vertex shaders and then translated to regular floating point values. for example:
Code: Select all
@property(packedVoxels)
const float3 FACES_NORMALS[6] = {
float3(0, -1, 0),
float3(0, 1, 0),
float3(0, 0, -1),
float3(0, 0, 1),
float3(1, 0, 0),
float3(-1, 0, 0),
};
float4 inValues = inVs_vertex;
@property( syntax == metal )
uint original = as_type<uint>(inValues.x);
uint originalSecond = as_type<uint>(inValues.y);
@else
uint original = floatBitsToUint(inValues.x);
uint originalSecond = floatBitsToUint(inValues.y);
@end
uint magicNumber = originalSecond & uint(0x1FFFFFFF);
float3 normVal = float3(0, 0, 0);
#undef inVs_normal
#define inVs_normal normVal
#undef inVs_vertex
#define inVs_vertex inValues
int pos_x = int(original & uint(0x3FF));
int pos_y = int((original >> 10) & uint(0x3FF));
int pos_z = int((original >> 20) & uint(0x3FF));
@property(offlineVoxels)
pos_x -= 128;
pos_y -= 128;
pos_z -= 128;
@end
inValues.x = float(pos_x)-0.5;
inValues.y = float(pos_y)-0.5;
inValues.z = float(pos_z)-0.5;
@property(voxelTerrain)
inValues.z -= 4.0;
@end
uint norm = uint((originalSecond >> 29) & uint(0x3));
uint ambient = uint((original >> 30) & uint(0x3));
@property(treeVertices)
uint voxel = ((originalSecond >> 8) & 0x3);
float mod = float(voxel) / 3;
inValues.y += sin(inValues.y*600 + passBuf.time) * 0.05 * mod;
inValues.z += cos(inValues.z*600 + passBuf.time) * 0.10 * mod;
inValues.x += sin(inValues.x*600 + passBuf.time) * 0.05 * mod;
@end
normVal = FACES_NORMALS[norm];
//If the renderable does not require pixel shading because it is a shadow caster for instance, these settings won't always be available.
@property( !hlms_shadowcaster || !hlms_shadow_uses_depth_texture || alpha_test || exponential_shadow_maps )
float ambientVal = float(ambient)/3;
outVs.diffuse = float3(ambientVal, ambientVal, ambientVal);
@end
@else
float4 inValues = inVs_vertex;
#undef inVs_vertex
#define inVs_vertex inValues
@property( !hlms_shadowcaster || !hlms_shadow_uses_depth_texture || alpha_test || exponential_shadow_maps )
outVs.diffuse = float3(1, 1, 1);
@end
@property(oceanVertices)
inVs_vertex.y += sin(inVs_vertex.x*600 + passBuf.time) * 0.25;
inVs_vertex.y += cos(inVs_vertex.z*600 + passBuf.time) * 0.25;
inVs_vertex.x += sin(inVs_vertex.x*600 + passBuf.time) * 0.001;
@end
@end
I recently switched to using the Hlms preprocessor to do this logic, I used to use inline if statements and read some magic numbers from each vertice definition to tell which type of voxel mesh we were rendering:
Code: Select all
#define TERRAIN_MAGIC_NUMBER 0x15FBF7DB
#define VOXELISER_MAGIC_NUMBER 0x15FBF7FB
#define OFFLINE_VOXELISER_MAGIC_NUMBER 0x15FBB7DB
const float3 FACES_NORMALS[6] = {
float3(0, -1, 0),
float3(0, 1, 0),
float3(0, 0, -1),
float3(0, 0, 1),
float3(1, 0, 0),
float3(-1, 0, 0),
};
float4 inValues = inVs_vertex;
@property( syntax == metal )
uint original = as_type<uint>(inValues.x);
uint originalSecond = as_type<uint>(inValues.y);
@else
uint original = floatBitsToUint(inValues.x);
uint originalSecond = floatBitsToUint(inValues.y);
@end
uint magicNumber = originalSecond & uint(0x1FFFFFFF);
float3 normVal = float3(0, 0, 0);
#undef inVs_normal
#define inVs_normal normVal
#undef inVs_vertex
#define inVs_vertex inValues
if(magicNumber == uint(TERRAIN_MAGIC_NUMBER) || magicNumber == uint(OFFLINE_VOXELISER_MAGIC_NUMBER) || magicNumber == uint(VOXELISER_MAGIC_NUMBER)){
int offset = 0;
if(magicNumber == uint(OFFLINE_VOXELISER_MAGIC_NUMBER)){
offset = 128;
}
int pos_x = int(original & uint(0x3FF)) - offset;
int pos_y = int((original >> 10) & uint(0x3FF)) - offset;
int pos_z = int((original >> 20) & uint(0x3FF)) - offset;
inValues.x = float(pos_x)-0.5;
inValues.y = float(pos_y)-0.5;
inValues.z = float(pos_z)-0.5;
if(magicNumber == uint(TERRAIN_MAGIC_NUMBER)){
inValues.z -= 4.0;
}
uint norm = uint((originalSecond >> 29) & uint(0x3));
uint ambient = uint((original >> 30) & uint(0x3));
normVal = FACES_NORMALS[norm];
//If the renderable does not require pixel shading because it is a shadow caster for instance, these settings won't always be available.
@property( !hlms_shadowcaster || !hlms_shadow_uses_depth_texture || alpha_test || exponential_shadow_maps )
float ambientVal = float(ambient)/3;
outVs.diffuse = float3(ambientVal, ambientVal, ambientVal);
@end
}else{
@property( !hlms_shadowcaster || !hlms_shadow_uses_depth_texture || alpha_test || exponential_shadow_maps )
outVs.diffuse = float3(1, 1, 1);
@end
inValues.y += sin(inValues.x*600 + passBuf.time) * 0.25;
inValues.y += cos(inValues.z*600 + passBuf.time) * 0.25;
inValues.x += sin(inValues.x*600 + passBuf.time) * 0.001;
}
These things are defined as pieces and inserted into the shader pipeline.
I override some methods in the hlmsPbs to set my hlms properties
Code: Select all
void calculateHashForPreCreate( Ogre::HlmsPbsAVCustom* hlms, Ogre::Renderable *renderable, Ogre::PiecesMap *inOutPieces ){
assert( dynamic_cast<Ogre::HlmsPbsDatablock *>( renderable->getDatablock() ) );
Ogre::HlmsPbsDatablock *datablock = static_cast<Ogre::HlmsPbsDatablock *>( renderable->getDatablock() );
const Ogre::Vector4 f = datablock->getUserValue(0);
AV::uint32 v = *(reinterpret_cast<const AV::uint32*>(&f.x));
if(v & ProceduralExplorationGameCore::HLMS_PACKED_VOXELS){
hlms->setProperty("packedVoxels", true);
}
if(v & ProceduralExplorationGameCore::HLMS_TERRAIN){
hlms->setProperty("voxelTerrain", true);
}
if(v & ProceduralExplorationGameCore::HLMS_PACKED_OFFLINE_VOXELS){
hlms->setProperty("offlineVoxels", true);
}
if(v & ProceduralExplorationGameCore::HLMS_OCEAN_VERTICES){
hlms->setProperty("oceanVertices", true);
}
if(v & ProceduralExplorationGameCore::HLMS_TREE_VERTICES){
hlms->setProperty("treeVertices", true);
}
}
I ended up using the user value from the datablock to set these values, which might not be the best place to do that but it worked. I then have to create specific datablocks with the user value flags turned on to get the correct shaders to generate.
My problem is that now when I go to render shadows they don't draw correctly, because these pieces are not making it into the vertex shader correctly. I'm wondering:
Is the calculateHashForPreCreate function the best place to do this?
Is there a better way to define properties for renderables? (As I'm typing this I have a sinking feeling I should've used calculateHashFor)
Where does the datablock/shaders to render shadows actually get created? I couldn't find it :')
Thanks!