uniform sampler2D DEPTH_TEXTURE : hint_depth_texture, repeat_disable; 
uniform sampler2D SCREEN_TEXTURE : hint_screen_texture, repeat_disable;
uniform float refraction = 0.075;
varying mat4 inv_mvp; 
uniform float absorbance : hint_range(0.0, 10.0) = 2.0;

uniform vec3 shallow_color : source_color = vec3(0.22, 0.66, 1.0);
uniform vec3 deep_color : source_color = vec3(0.0, 0.25, 0.45);

uniform float foam_amount : hint_range(0.0, 2.0) = 0.2;
uniform vec3 foam_color : source_color = vec3(1);
uniform sampler2D caustic_texture : hint_default_white,repeat_enable,filter_linear_mipmap;
uniform sampler2D foam_texture : hint_default_white,repeat_enable,filter_linear_mipmap;

uniform float roughness : hint_range(0.0, 1.0) = 0.05;
uniform float wave_scale = 4.0;

varying vec3 uv_world_pos;

uniform sampler2D normal1 : hint_normal,repeat_enable,filter_linear_mipmap;
uniform vec2 wave_dir1 = vec2(1.0, 0.0);
uniform sampler2D normal2 :hint_normal,repeat_enable,filter_linear_mipmap;
uniform vec2 wave_dir2 = vec2(0.0, 1.0);
uniform float wave_speed : hint_range(0.0, 0.2) = 0.015;

vec3 screen(vec3 base, vec3 blend){
	return 1.0 - (1.0 - base) * (1.0 - blend);
}
void vertex() {
	uv_world_pos = (MODEL_MATRIX * vec4(VERTEX, 1.0)).xyz;
	inv_mvp = inverse(PROJECTION_MATRIX * MODELVIEW_MATRIX);
}

void fragment()
{
	vec2 normal_offset1 = (TIME * wave_dir1) * wave_speed;
	vec2 normal_offset2 = (TIME * wave_dir2) * wave_speed;
	vec3 normal_blend = mix(texture(normal1, uv_world_pos.xz / wave_scale + normal_offset1), texture(normal2, uv_world_pos.xz / wave_scale + normal_offset2), 0.5).rgb;
	vec3 ref_normalmap = normal_blend * 2.0 - 1.0;
	ref_normalmap = normalize(TANGENT * ref_normalmap.x + BINORMAL * ref_normalmap.y + NORMAL *ref_normalmap.z);
	vec2 ref_uv = SCREEN_UV + (ref_normalmap.xy);
	float depth_raw = texture(DEPTH_TEXTURE, ref_uv).r * 2.0 - 1.0;
	float depth = PROJECTION_MATRIX[3][2] / (depth_raw + PROJECTION_MATRIX[2][2]);
	float depth_blend = exp((depth+VERTEX.z + absorbance) * -absorbance);
	depth_blend = clamp(1.0-depth_blend, 0.0, 1.0);
	float depth_blend_pow = clamp(pow(depth_blend, 2.5), 0.0, 1.0);
	float depth1 = texture(DEPTH_TEXTURE, SCREEN_UV, 0.0).r;
  	vec3 ndc = vec3(SCREEN_UV * 2.0 - 1.0, depth1);
	vec4 world = INV_VIEW_MATRIX * INV_PROJECTION_MATRIX * vec4(ndc, 1.0);
	float depth_texture_y = world.y / world.w;
	float vertex_y = (INV_VIEW_MATRIX * vec4(VERTEX, 1.0)).y;
	float vertical_depth = vertex_y - depth_texture_y;
	
	
	// Makes the water more transparent as it gets more shallow
	float alpha_blend = -vertical_depth * absorbance;
	alpha_blend = clamp(1.0 - exp(alpha_blend), 0.6, 1.0);
	vec2 distortUV = SCREEN_UV + ref_normalmap.xy * refraction;
	vec3 refractiontex = texture(SCREEN_TEXTURE, distortUV).xyz;
	vec3 screen_color = refractiontex;
	
		
	// Small layer of foam
	float foam_blend = clamp(1.0 - (vertical_depth / foam_amount), 0.0,1.0);
	vec4 foamtex = texture(foam_texture,uv_world_pos.xz / wave_scale + normal_offset1 + normal_offset2);
	vec3 foam = foam_blend * foam_color * foamtex.rgb;
	
	// Mix them all together
	vec3 color_out = mix(deep_color, shallow_color, alpha_blend);
	color_out = screen(color_out, foam);
	color_out = mix(color_out, screen_color,alpha_blend);
	mediump float fresnel = sqrt(1.0 - dot(NORMAL, VIEW));
	


	// Set all values:
	ALBEDO = color_out;
	METALLIC = 0.05;
	ROUGHNESS = roughness;
	SPECULAR = 0.2 + depth_blend_pow * fresnel;
	NORMAL_MAP = normal_blend;
	ALPHA = alpha_blend;
	
}```

Enable SSR (screen space reflections) in environment, or use a voxelGI or other form of GI.
SSR is the fastest in modern computers.
A PBR material gets its reflections from GI.
But you are using a screen texture, there are limitations to what you can do with those. I think you need depth data for things to work, but try it.

    DJMaesen after some testing, SSR doesn't work with refraction. but voxelGI does, so that's a solution.
    If you want to use the screen texture you have to do your own SSR, raytrace the rays from the surface using the color and depth screen textures.
    You need the normal of the surface, which you may or may no have access to.
    its position in 3D, which I think has to be calculated from VERTEX in vertex shader, since it doesn't write to the depth buffer.
    and then to do a raytrace using a number of samples and the direction, and then you have to do reflection.
    I'm afraid I'm not smart enough to help you with this.

    Jesusemora A PBR material gets its reflections from GI.

    That's only true for fully path traced renderers and perhaps some hybrid raytracers. Neither of which godot currently is.


    Traditionally for a planar reflection you set up a alternative camera rendering a mirror take(/opposite) of the player camera as far away from camera ray collision with any surface(up to a reasonable limit) as player camera but in the opposite/reflection of player camera. And then the rendertarget(viewport in godot terms) from that camera gets piped as a reflection texture to the material that needs that texture for the reflection. Most cases that material would likely be a custom shader doing some further view/camera space transforms to make sure the reflection looks correct. That's the basic gist of planar reflections of the '90s and mid-naughties/aughts. Planar reflections like this tend to be expensive.

    yes thats the way to do it

    however im way too incompetent in shader coding to achieve it.

      DJMaesen You can't do it with shaders alone. Why just not use built in screen space reflections?

        xyz he's reading from the depth buffer and screen texture. Refraction is drawn on top of the scene.
        SSR is semi-post-process and reads the depth buffer for reflections.

        • xyz replied to this.

          DJMaesen however im way too incompetent in shader coding to achieve it.

          In it's simplest form without any further shader-logic spatial corrections the viewport texture can be simply piped to a texture parameter via script. So long as the capture camera is correctly transformed for it to be a reflection it'll still work about well enough.

          Just remember that the camera transform has to include a negative scaling of one. Along Z axis I want to say off the top of my head. And then correctly placed and oriented.

            Megalomaniak sounds what i need , but i dont have the knowledge for that.
            why doesnt godot have a water example with planar reflections i wonder?

            • xyz replied to this.

              DJMaesen It's kinda doable. I made a quick test with reflected camera plus viewport. The only thing I haven't figured out is how to clip the geometry (without a shader) so that reflection camera doesn't render underwater pixels. Afaik Godot's camera doesn't let you define an OpenGL style arbitrary clipping plane.

              Note that this is somewhat expensive as you need to render the whole scene twice.

                xyz looks sweet
                so what would be the best solution then to have realtime reflections?

                • xyz replied to this.

                  DJMaesen Probably adapting your shaders to play nice with Godot's SSR. In case you still want to go with rendered reflections, you can render into half or quarter resolution viewport which may speed things up considerably.

                    DJMaesen Here's the camera transform code if you want to play with this. It's quite simple. The assumption is that water plane is always horizontal and its y position is 0. For an arbitrary plane, the code would be a tiny bit more involved.

                    extends Camera3D
                    
                    @export var cam: Camera3D # assign the main camera to this
                    
                    func _process(delta):
                    	global_position = cam.global_position
                    	global_position.y = -cam.global_position.y
                    	global_transform.basis = cam.global_transform.basis.scaled(Vector3(1, -1, 1))
                    	fov = cam.fov

                    xyz I don't know if maybe camera cull masks or frustum offsets might be handy there? The latter is probably more useful in combination with shader doing adjustments and corrections tho I suspect.

                    https://docs.godotengine.org/en/stable/classes/class_camera3d.html#class-camera3d-property-cull-mask

                    Note: Since the cull_mask allows for 32 layers to be stored in total, there are an additional 12 layers that are only used internally by the engine and aren't exposed in the editor. Setting cull_mask using a script allows you to toggle those reserved layers, which can be useful for editor plugins.

                    this is a water shader with realtime reflections and refraction
                    however my player arms and gun get reflected too wich is something i dont want

                    
                    uniform sampler2D DEPTH_TEXTURE : hint_depth_texture, repeat_disable; 
                    uniform sampler2D SCREEN_TEXTURE : hint_screen_texture, repeat_disable;
                    uniform float refraction = 0.075;
                    varying mat4 inv_mvp; 
                    uniform float absorbance : hint_range(0.0, 10.0) = 2.0;
                    
                    uniform vec3 shallow_color : source_color = vec3(0.22, 0.66, 1.0);
                    uniform vec3 deep_color : source_color = vec3(0.0, 0.25, 0.45);
                    
                    uniform float ssr_resolution   : hint_range(0.0, 10.0, 0.1)		= 2.0;
                    uniform float ssr_max_travel   : hint_range(0.0, 200.0, 0.1) 	= 30.0;
                    uniform float ssr_max_diff     : hint_range(0.1, 10.0, 0.1) 	= 4.0;
                    uniform float ssr_mix_strength : hint_range(0.0, 1.0, 0.01) 	= 0.7;
                    
                    
                    
                    uniform float foam_amount : hint_range(0.0, 2.0) = 0.2;
                    uniform vec3 foam_color : source_color = vec3(1);
                    uniform sampler2D caustic_texture : hint_default_white,repeat_enable,filter_linear_mipmap;
                    uniform sampler2D foam_texture : hint_default_white,repeat_enable,filter_linear_mipmap;
                    
                    uniform float roughness : hint_range(0.0, 1.0) = 0.05;
                    uniform float wave_scale = 4.0;
                    
                    varying vec3 uv_world_pos;
                    
                    uniform sampler2D normal1 : hint_normal,repeat_enable,filter_linear_mipmap;
                    uniform vec2 wave_dir1 = vec2(1.0, 0.0);
                    uniform sampler2D normal2 :hint_normal,repeat_enable,filter_linear_mipmap;
                    uniform vec2 wave_dir2 = vec2(0.0, 1.0);
                    uniform float wave_speed : hint_range(0.0, 0.2) = 0.015;
                    
                    vec3 screen(vec3 base, vec3 blend){
                    	return 1.0 - (1.0 - base) * (1.0 - blend);
                    }
                    void vertex() {
                    	uv_world_pos = (MODEL_MATRIX * vec4(VERTEX, 1.0)).xyz;
                    	inv_mvp = inverse(PROJECTION_MATRIX * MODELVIEW_MATRIX);
                    }
                    bool is_within_screen_boundaries(vec2 position) {
                    	return position.x > 0.0 && position.x < 1.0 && position.y > 0.0 && position.y < 1.0;
                    }
                    
                    vec2 get_uv_from_view_position(vec3 position_view_space, mat4 proj_m)
                    {
                    	vec4 position_clip_space = proj_m * vec4(position_view_space.xyz, 1.0);
                    	vec2 position_ndc = position_clip_space.xy / position_clip_space.w;
                    	return position_ndc.xy * 0.5 + 0.5;
                    }
                    
                    vec3 get_view_position_from_uv(vec2 uv, float depth, mat4 inv_proj_m)
                    {
                    	vec4 position_ndc = vec4((uv * 2.0) - 1.0, depth, 1.0);
                    	vec4 view_position = inv_proj_m * position_ndc;
                    	return view_position.xyz /= view_position.w;
                    }
                    
                    vec3 get_ssr_color(vec3 surface_view_position, vec3 normal_view_space, vec3 view_view_space, mat4 proj_m, mat4 inv_proj_m)
                    {
                    	vec3 current_position_view_space = surface_view_position;
                    	vec3 view_direction_view_space = view_view_space * -1.0;
                    	vec3 reflect_vector_view_space = normalize(reflect(view_direction_view_space.xyz, normal_view_space.xyz));
                    	
                    	vec2 current_screen_position = vec2(0.0);
                    	
                    	vec3 resulting_color = vec3(-1.0);
                    	for(float travel=0.0; resulting_color.x < 0.0 && travel < ssr_max_travel; travel = travel + ssr_resolution)
                    	{
                    		current_position_view_space += reflect_vector_view_space * ssr_resolution;
                    		current_screen_position = get_uv_from_view_position(current_position_view_space, proj_m);
                    
                    		float depth_texture_probe_raw = texture(DEPTH_TEXTURE, current_screen_position).x;
                    		vec3 depth_texture_probe_view_position = get_view_position_from_uv(current_screen_position, depth_texture_probe_raw, inv_proj_m);
                    		
                    		float depth_diff = depth_texture_probe_view_position.z - current_position_view_space.z;
                    		
                    		resulting_color = (is_within_screen_boundaries(current_screen_position) && depth_diff >= 0.0 && depth_diff < ssr_max_diff) ? texture(SCREEN_TEXTURE, current_screen_position.xy).rgb : vec3(-1.0);
                    	}
                    	return resulting_color;
                    }
                    
                    void fragment()
                    {
                    	vec2 normal_offset1 = (TIME * wave_dir1) * wave_speed;
                    	vec2 normal_offset2 = (TIME * wave_dir2) * wave_speed;
                    	vec3 normal_blend = mix(texture(normal1, uv_world_pos.xz / wave_scale + normal_offset1), texture(normal2, uv_world_pos.xz / wave_scale + normal_offset2), 0.5).rgb;
                    	vec3 ref_normalmap = normal_blend * 2.0 - 1.0;
                    	ref_normalmap = normalize(TANGENT * ref_normalmap.x + BINORMAL * ref_normalmap.y + NORMAL *ref_normalmap.z);
                    	vec2 ref_uv = SCREEN_UV + (ref_normalmap.xy);
                    	float depth_raw = texture(DEPTH_TEXTURE, ref_uv).r * 2.0 - 1.0;
                    	float depth = PROJECTION_MATRIX[3][2] / (depth_raw + PROJECTION_MATRIX[2][2]);
                    	float depth_blend = exp((depth+VERTEX.z + absorbance) * -absorbance);
                    	depth_blend = clamp(1.0-depth_blend, 0.0, 1.0);
                    	float depth_blend_pow = clamp(pow(depth_blend, 2.5), 0.0, 1.0);
                    	float depth1 = texture(DEPTH_TEXTURE, SCREEN_UV, 0.0).r;
                      	vec3 ndc = vec3(SCREEN_UV * 2.0 - 1.0, depth1);
                    	vec4 world = INV_VIEW_MATRIX * INV_PROJECTION_MATRIX * vec4(ndc, 1.0);
                    	float depth_texture_y = world.y / world.w;
                    	float vertex_y = (INV_VIEW_MATRIX * vec4(VERTEX, 1.0)).y;
                    	float vertical_depth = vertex_y - depth_texture_y;
                    	
                    	
                    	// Makes the water more transparent as it gets more shallow
                    	float alpha_blend = -vertical_depth * absorbance;
                    	alpha_blend = clamp(1.0 - exp(alpha_blend), 0.6, 1.0);
                    	vec2 distortUV = SCREEN_UV + ref_normalmap.xy * refraction;
                    	vec3 refractiontex = texture(SCREEN_TEXTURE, distortUV).xyz;
                    	
                    	float surface_depth = FRAGCOORD.z;
                    	vec3 surface_view_position 	= get_view_position_from_uv(distortUV, surface_depth, INV_PROJECTION_MATRIX);
                    	
                    	vec3 ssr_color = get_ssr_color(surface_view_position, NORMAL, VIEW, PROJECTION_MATRIX, INV_PROJECTION_MATRIX);
                    	vec3 screen_color = refractiontex;
                    	// Small layer of foam
                    	float foam_blend = clamp(1.0 - (vertical_depth / foam_amount), 0.0,1.0);
                    	vec4 foamtex = texture(foam_texture,uv_world_pos.xz / wave_scale + normal_offset1 + normal_offset2);
                    	vec3 foam = foam_blend * foam_color * foamtex.rgb;
                    	
                    	// Mix them all together
                    	vec3 color_out = mix(deep_color, shallow_color, alpha_blend);
                    	color_out = screen(color_out, foam);
                    	color_out = mix(color_out, screen_color,alpha_blend);
                    	vec3 water_color= (ssr_color.x > 0.0) ? mix(color_out, ssr_color, ssr_mix_strength) : color_out;
                    	mediump float fresnel = sqrt(1.0 - dot(NORMAL, VIEW));
                    	
                    
                    
                    	// Set all values:
                    	ALBEDO = water_color;
                    	METALLIC = 0.05;
                    	ROUGHNESS = roughness;
                    	SPECULAR = 0.2 + depth_blend_pow * fresnel;
                    	NORMAL_MAP = normal_blend;
                    	ALPHA = alpha_blend;
                    	
                    }`

                      DJMaesen Objects having shaders that read from screen or depth textures are eliminated from reflections. So you can add this to your hand shader:

                      uniform sampler2D t: hint_depth_texture;
                      void vertex() {
                      	texture(t, UV);
                      }

                        DJMaesen this is a water shader with realtime reflections and refraction
                        however my player arms and gun get reflected too wich is something i dont want

                        Hunt: Showdown needed a couple of years to do it right 😆

                        Just wanted to say, good thing that you care about this. Crytek didn't for a long time. 🙂