|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#version 450 core |
| 10 | + |
| 11 | +#define PRECISION ${PRECISION} |
| 12 | + |
| 13 | +#define IN_T ${buffer_scalar_type(IN_DTYPE)} |
| 14 | + |
| 15 | +${define_active_storage_type("buffer")} |
| 16 | +${define_required_extensions(IN_DTYPE)} |
| 17 | + |
| 18 | +#extension GL_EXT_control_flow_attributes : require |
| 19 | + |
| 20 | +layout(std430) buffer; |
| 21 | + |
| 22 | +${layout_declare_tensor(B, "r", "t_in", IN_DTYPE, "buffer")} |
| 23 | +${layout_declare_tensor(B, "w", "t_scale", "float", "buffer")} |
| 24 | +${layout_declare_tensor(B, "w", "t_zero_point", "int", "buffer")} |
| 25 | + |
| 26 | +$if MODE == "per_tensor": |
| 27 | + layout(push_constant) uniform restrict Block { |
| 28 | + int quant_min; |
| 29 | + int quant_max; |
| 30 | + }; |
| 31 | +$else: |
| 32 | + layout(push_constant) uniform restrict Block { |
| 33 | + int num_tokens; |
| 34 | + int quant_min; |
| 35 | + int quant_max; |
| 36 | + }; |
| 37 | + |
| 38 | +${layout_declare_ubo(B, "ivec4", "t_in_sizes")} |
| 39 | +${layout_declare_ubo(B, "ivec4", "t_in_strides")} |
| 40 | +${layout_declare_ubo(B, "ivec4", "t_scale_sizes")} |
| 41 | +${layout_declare_ubo(B, "ivec4", "t_scale_strides")} |
| 42 | +${layout_declare_ubo(B, "ivec4", "t_zero_point_sizes")} |
| 43 | +${layout_declare_ubo(B, "ivec4", "t_zero_point_strides")} |
| 44 | + |
| 45 | +#include "indexing_utils.h" |
| 46 | +#include "choose_qparams.glslh" |
| 47 | + |
| 48 | +layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; |
| 49 | + |
| 50 | +#define NWORKERS 64 |
| 51 | + |
| 52 | +// Shared memory for reduction - must match local work group size |
| 53 | +shared float shared_min[NWORKERS]; |
| 54 | +shared float shared_max[NWORKERS]; |
| 55 | + |
| 56 | +void main() { |
| 57 | +$if MODE == "per_tensor": |
| 58 | + uint global_id = gl_GlobalInvocationID.x; |
| 59 | + uint local_id = gl_LocalInvocationID.x; |
| 60 | + uint group_id = gl_WorkGroupID.x; |
| 61 | + uint total_threads = gl_NumWorkGroups.x * gl_WorkGroupSize.x; |
| 62 | + |
| 63 | + uint total_elements = uint(t_in_sizes.x * t_in_sizes.y * t_in_sizes.z * t_in_sizes.w); |
| 64 | + |
| 65 | + // Each thread processes multiple elements with stride |
| 66 | + float thread_min = 1.0/0.0; // +infinity |
| 67 | + float thread_max = -1.0/0.0; // -infinity |
| 68 | + bool found_valid = false; |
| 69 | + |
| 70 | + for (uint i = global_id; i < total_elements; i += total_threads) { |
| 71 | + float val = t_in[i]; |
| 72 | + if (!isnan(val) && !isinf(val)) { |
| 73 | + if (!found_valid) { |
| 74 | + thread_min = val; |
| 75 | + thread_max = val; |
| 76 | + found_valid = true; |
| 77 | + } else { |
| 78 | + thread_min = min(thread_min, val); |
| 79 | + thread_max = max(thread_max, val); |
| 80 | + } |
| 81 | + } |
| 82 | + } |
| 83 | + |
| 84 | + // Intra-group reduction using shared memory |
| 85 | + shared_min[local_id] = thread_min; |
| 86 | + shared_max[local_id] = thread_max; |
| 87 | + barrier(); |
| 88 | + |
| 89 | + // Tree reduction within work group |
| 90 | + for (uint stride = gl_WorkGroupSize.x / 2; stride > 0; stride >>= 1) { |
| 91 | + if (local_id < stride) { |
| 92 | + float other_min = shared_min[local_id + stride]; |
| 93 | + float other_max = shared_max[local_id + stride]; |
| 94 | + |
| 95 | + if (!isinf(other_min) && (isinf(shared_min[local_id]) || other_min < shared_min[local_id])) { |
| 96 | + shared_min[local_id] = other_min; |
| 97 | + } |
| 98 | + if (!isinf(other_max) && (isinf(shared_max[local_id]) || other_max > shared_max[local_id])) { |
| 99 | + shared_max[local_id] = other_max; |
| 100 | + } |
| 101 | + } |
| 102 | + barrier(); |
| 103 | + } |
| 104 | + |
| 105 | + // Final result calculation (single workgroup only) |
| 106 | + if (local_id == 0) { |
| 107 | + float global_min = shared_min[0]; |
| 108 | + float global_max = shared_max[0]; |
| 109 | + |
| 110 | + float scale_val; |
| 111 | + int zero_point_val; |
| 112 | + calculate_scale_and_zero_point(global_min, global_max, quant_min, quant_max, scale_val, zero_point_val); |
| 113 | + |
| 114 | + t_scale[0] = scale_val; |
| 115 | + t_zero_point[0] = zero_point_val; |
| 116 | + } |
| 117 | + |
| 118 | +$if MODE == "per_token": |
| 119 | + uint global_id = gl_GlobalInvocationID.x; |
| 120 | + uint local_id = gl_LocalInvocationID.x; |
| 121 | + uint group_id = gl_WorkGroupID.x; |
| 122 | + uint total_workgroups = gl_NumWorkGroups.x; |
| 123 | + |
| 124 | + uint total_elements = uint(t_in_sizes.x * t_in_sizes.y * t_in_sizes.z * t_in_sizes.w); |
| 125 | + uint token_size = total_elements / uint(num_tokens); |
| 126 | + |
| 127 | + // Calculate how many tokens each workgroup should process |
| 128 | + // This handles the case where we have more tokens than workgroups |
| 129 | + uint tokens_per_workgroup = (uint(num_tokens) + total_workgroups - 1) / total_workgroups; |
| 130 | + |
| 131 | + // Calculate which tokens this workgroup is responsible for |
| 132 | + uint start_token = group_id * tokens_per_workgroup; |
| 133 | + uint end_token = min(start_token + tokens_per_workgroup, uint(num_tokens)); |
| 134 | + |
| 135 | + // Early exit if this workgroup has no tokens to process |
| 136 | + if (start_token >= uint(num_tokens)) { |
| 137 | + return; |
| 138 | + } |
| 139 | + |
| 140 | + // Process each token assigned to this workgroup |
| 141 | + for (uint token_id = start_token; token_id < end_token; token_id++) { |
| 142 | + // Calculate the start and end indices for this token |
| 143 | + uint token_start = token_id * token_size; |
| 144 | + uint token_end = token_start + token_size; |
| 145 | + |
| 146 | + // Each thread processes multiple elements within the token with stride |
| 147 | + float thread_min = 1.0/0.0; // +infinity |
| 148 | + float thread_max = -1.0/0.0; // -infinity |
| 149 | + bool found_valid = false; |
| 150 | + |
| 151 | + // Process elements within this token only |
| 152 | + for (uint i = token_start + local_id; i < token_end; i += gl_WorkGroupSize.x) { |
| 153 | + float val = t_in[i]; |
| 154 | + if (!isnan(val) && !isinf(val)) { |
| 155 | + if (!found_valid) { |
| 156 | + thread_min = val; |
| 157 | + thread_max = val; |
| 158 | + found_valid = true; |
| 159 | + } else { |
| 160 | + thread_min = min(thread_min, val); |
| 161 | + thread_max = max(thread_max, val); |
| 162 | + } |
| 163 | + } |
| 164 | + } |
| 165 | + |
| 166 | + // Intra-group reduction using shared memory |
| 167 | + shared_min[local_id] = thread_min; |
| 168 | + shared_max[local_id] = thread_max; |
| 169 | + barrier(); |
| 170 | + |
| 171 | + // Tree reduction within work group |
| 172 | + for (uint stride = gl_WorkGroupSize.x / 2; stride > 0; stride >>= 1) { |
| 173 | + if (local_id < stride) { |
| 174 | + float other_min = shared_min[local_id + stride]; |
| 175 | + float other_max = shared_max[local_id + stride]; |
| 176 | + |
| 177 | + if (!isinf(other_min) && (isinf(shared_min[local_id]) || other_min < shared_min[local_id])) { |
| 178 | + shared_min[local_id] = other_min; |
| 179 | + } |
| 180 | + if (!isinf(other_max) && (isinf(shared_max[local_id]) || other_max > shared_max[local_id])) { |
| 181 | + shared_max[local_id] = other_max; |
| 182 | + } |
| 183 | + } |
| 184 | + barrier(); |
| 185 | + } |
| 186 | + |
| 187 | + // Final calculation for this token |
| 188 | + if (local_id == 0) { |
| 189 | + float token_min = shared_min[0]; |
| 190 | + float token_max = shared_max[0]; |
| 191 | + |
| 192 | + float scale_val; |
| 193 | + int zero_point_val; |
| 194 | + calculate_scale_and_zero_point(token_min, token_max, quant_min, quant_max, scale_val, zero_point_val); |
| 195 | + |
| 196 | + t_scale[token_id] = scale_val; |
| 197 | + t_zero_point[token_id] = zero_point_val; |
| 198 | + } |
| 199 | + |
| 200 | + // Synchronize before processing next token |
| 201 | + barrier(); |
| 202 | + } |
| 203 | +} |
0 commit comments