diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ea1c47b7..e731d95df 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,6 +8,11 @@ if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() +if (MSVC) + add_compile_definitions(_CRT_SECURE_NO_WARNINGS) + add_compile_definitions(_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING) +endif() + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) diff --git a/cache_dit.hpp b/cache_dit.hpp index f5a1f180b..6fe104dac 100644 --- a/cache_dit.hpp +++ b/cache_dit.hpp @@ -117,7 +117,7 @@ struct TaylorSeerState { continue; if (o > 0) factorial *= static_cast(o); - float coeff = std::pow(static_cast(elapsed), o) / factorial; + float coeff = ::powf(static_cast(elapsed), static_cast(o)) / factorial; for (size_t i = 0; i < size; i++) { output[i] += coeff * dY_prev[o][i]; } diff --git a/clip.hpp b/clip.hpp index 24c94f1bb..7a6ebe9e7 100644 --- a/clip.hpp +++ b/clip.hpp @@ -296,7 +296,7 @@ class CLIPTokenizer { size_t max_length = 0, bool padding = false) { if (max_length > 0 && padding) { - size_t n = std::ceil(tokens.size() * 1.0 / (max_length - 2)); + size_t n = static_cast(std::ceil(tokens.size() * 1.0 / (max_length - 2))); if (n == 0) { n = 1; } @@ -525,10 +525,10 @@ struct CLIPLayer : public GGMLBlock { struct CLIPEncoder : public GGMLBlock { protected: - int64_t n_layer; + int n_layer; public: - CLIPEncoder(int64_t n_layer, + CLIPEncoder(int n_layer, int64_t d_model, int64_t n_head, int64_t intermediate_size, @@ -623,10 +623,10 @@ class CLIPEmbeddings : public GGMLBlock { class CLIPVisionEmbeddings : public GGMLBlock { protected: int64_t embed_dim; - int64_t num_channels; - int64_t patch_size; - int64_t image_size; - int64_t num_patches; + int num_channels; + int patch_size; + int image_size; + int num_patches; int64_t num_positions; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { @@ -641,9 +641,9 @@ class CLIPVisionEmbeddings : public GGMLBlock { public: CLIPVisionEmbeddings(int64_t embed_dim, - int64_t num_channels = 3, - int64_t patch_size = 14, - int64_t image_size = 224) + int num_channels = 3, + int patch_size = 14, + int image_size = 224) : embed_dim(embed_dim), num_channels(num_channels), patch_size(patch_size), diff --git a/common.hpp b/common.hpp index b17c11e35..13ab1038e 100644 --- a/common.hpp +++ b/common.hpp @@ -80,7 +80,7 @@ class ResBlock : public GGMLBlock { std::pair padding) { GGML_ASSERT(dims == 2 || dims == 3); if (dims == 3) { - return std::shared_ptr(new Conv3dnx1x1(in_channels, out_channels, kernel_size.first, 1, padding.first)); + return std::shared_ptr(new Conv3d(in_channels, out_channels, {kernel_size.first, 1, 1}, {1, 1, 1}, {padding.first, 0, 0})); } else { return std::shared_ptr(new Conv2d(in_channels, out_channels, kernel_size, {1, 1}, padding)); } @@ -544,9 +544,9 @@ class AlphaBlender : public GGMLBlock { class VideoResBlock : public ResBlock { public: - VideoResBlock(int channels, - int emb_channels, - int out_channels, + VideoResBlock(int64_t channels, + int64_t emb_channels, + int64_t out_channels, std::pair kernel_size = {3, 3}, int64_t video_kernel_size = 3, int dims = 2) // always 2 diff --git a/conditioner.hpp b/conditioner.hpp index 45db314b9..b6d5646a7 100644 --- a/conditioner.hpp +++ b/conditioner.hpp @@ -303,11 +303,11 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { int class_token = clean_input_ids[class_token_index[0]]; class_idx = tokens_acc + class_token_index[0]; std::vector clean_input_ids_tmp; - for (uint32_t i = 0; i < class_token_index[0]; i++) + for (int i = 0; i < class_token_index[0]; i++) clean_input_ids_tmp.push_back(clean_input_ids[i]); - for (uint32_t i = 0; i < (pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs); i++) + for (int i = 0; i < (pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs); i++) clean_input_ids_tmp.push_back(class_token); - for (uint32_t i = class_token_index[0] + 1; i < clean_input_ids.size(); i++) + for (int i = class_token_index[0] + 1; i < clean_input_ids.size(); i++) clean_input_ids_tmp.push_back(clean_input_ids[i]); clean_input_ids.clear(); clean_input_ids = clean_input_ids_tmp; @@ -322,7 +322,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { tokenizer.pad_tokens(tokens, weights, max_length, padding); int offset = pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs; - for (uint32_t i = 0; i < tokens.size(); i++) { + for (int i = 0; i < tokens.size(); i++) { // if (class_idx + 1 <= i && i < class_idx + 1 + 2*num_input_imgs) // photomaker V2 has num_tokens(=2)*num_input_imgs if (class_idx + 1 <= i && i < class_idx + 1 + offset) // photomaker V2 has num_tokens(=2)*num_input_imgs // hardcode for now @@ -1584,7 +1584,7 @@ struct T5CLIPEmbedder : public Conditioner { chunk_hidden_states->ne[0], ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); - modify_mask_to_attend_padding(t5_attn_mask, ggml_nelements(t5_attn_mask), mask_pad); + modify_mask_to_attend_padding(t5_attn_mask, static_cast(ggml_nelements(t5_attn_mask)), mask_pad); return {hidden_states, t5_attn_mask, nullptr}; } @@ -1723,8 +1723,8 @@ struct LLMEmbedder : public Conditioner { double factor = llm->params.vision.patch_size * llm->params.vision.spatial_merge_size; int height = image.height; int width = image.width; - int h_bar = static_cast(std::round(height / factor)) * factor; - int w_bar = static_cast(std::round(width / factor)) * factor; + int h_bar = static_cast(std::round(height / factor) * factor); + int w_bar = static_cast(std::round(width / factor) * factor); if (static_cast(h_bar) * w_bar > max_pixels) { double beta = std::sqrt((height * width) / static_cast(max_pixels)); @@ -1752,7 +1752,7 @@ struct LLMEmbedder : public Conditioner { ggml_tensor* image_embed = nullptr; llm->encode_image(n_threads, image_tensor, &image_embed, work_ctx); image_embeds.emplace_back(image_embed_idx, image_embed); - image_embed_idx += 1 + image_embed->ne[1] + 6; + image_embed_idx += 1 + static_cast(image_embed->ne[1]) + 6; img_prompt += "Picture " + std::to_string(i + 1) + ": <|vision_start|>"; // [24669, 220, index, 25, 220, 151652] int64_t num_image_tokens = image_embed->ne[1]; @@ -1799,9 +1799,9 @@ struct LLMEmbedder : public Conditioner { prompt = "[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]"; - prompt_attn_range.first = prompt.size(); + prompt_attn_range.first = static_cast(prompt.size()); prompt += conditioner_params.text; - prompt_attn_range.second = prompt.size(); + prompt_attn_range.second = static_cast(prompt.size()); prompt += "[/INST]"; } else if (version == VERSION_OVIS_IMAGE) { diff --git a/denoiser.hpp b/denoiser.hpp index 2ed62fe9a..98aef702d 100644 --- a/denoiser.hpp +++ b/denoiser.hpp @@ -245,7 +245,7 @@ struct SGMUniformScheduler : SigmaScheduler { int t_max = TIMESTEPS - 1; int t_min = 0; std::vector timesteps = linear_space(static_cast(t_max), static_cast(t_min), n + 1); - for (int i = 0; i < n; i++) { + for (uint32_t i = 0; i < n; i++) { result.push_back(t_to_sigma_func(timesteps[i])); } result.push_back(0.0f); @@ -259,11 +259,11 @@ struct LCMScheduler : SigmaScheduler { result.reserve(n + 1); const int original_steps = 50; const int k = TIMESTEPS / original_steps; - for (int i = 0; i < n; i++) { + for (uint32_t i = 0; i < n; i++) { // the rounding ensures we match the training schedule of the LCM model int index = (i * original_steps) / n; int timestep = (original_steps - index) * k - 1; - result.push_back(t_to_sigma(timestep)); + result.push_back(t_to_sigma(static_cast(timestep))); } result.push_back(0.0f); return result; @@ -525,8 +525,8 @@ struct CompVisVDenoiser : public CompVisDenoiser { }; struct EDMVDenoiser : public CompVisVDenoiser { - float min_sigma = 0.002; - float max_sigma = 120.0; + float min_sigma = 0.002f; + float max_sigma = 120.0f; EDMVDenoiser(float min_sigma = 0.002, float max_sigma = 120.0) : min_sigma(min_sigma), max_sigma(max_sigma) { @@ -537,7 +537,7 @@ struct EDMVDenoiser : public CompVisVDenoiser { } float sigma_to_t(float s) override { - return 0.25 * std::log(s); + return 0.25f * std::log(s); } float sigma_min() override { @@ -569,7 +569,7 @@ struct DiscreteFlowDenoiser : public Denoiser { void set_parameters() { for (int i = 1; i < TIMESTEPS + 1; i++) { - sigmas[i - 1] = t_to_sigma(i); + sigmas[i - 1] = t_to_sigma(static_cast(i)); } } @@ -612,7 +612,7 @@ struct DiscreteFlowDenoiser : public Denoiser { }; float flux_time_shift(float mu, float sigma, float t) { - return std::exp(mu) / (std::exp(mu) + std::pow((1.0 / t - 1.0), sigma)); + return ::expf(mu) / (::expf(mu) + ::powf((1.0f / t - 1.0f), sigma)); } struct FluxFlowDenoiser : public Denoiser { @@ -632,7 +632,7 @@ struct FluxFlowDenoiser : public Denoiser { void set_parameters(float shift) { set_shift(shift); for (int i = 0; i < TIMESTEPS; i++) { - sigmas[i] = t_to_sigma(i); + sigmas[i] = t_to_sigma(static_cast(i)); } } @@ -1327,15 +1327,12 @@ static bool sample_k_diffusion(sample_method_t method, // - pred_sample_direction -> "direction pointing to // x_t" // - pred_prev_sample -> "x_t-1" - int timestep = - roundf(TIMESTEPS - - i * ((float)TIMESTEPS / steps)) - - 1; + int timestep = static_cast(roundf(TIMESTEPS - i * ((float)TIMESTEPS / steps))) - 1; // 1. get previous step value (=t-1) - int prev_timestep = timestep - TIMESTEPS / steps; + int prev_timestep = timestep - TIMESTEPS / static_cast(steps); // The sigma here is chosen to cause the // CompVisDenoiser to produce t = timestep - float sigma = compvis_sigmas[timestep]; + float sigma = static_cast(compvis_sigmas[timestep]); if (i == 0) { // The function add_noise intializes x to // Diffusers' latents * sigma (as in Diffusers' @@ -1392,10 +1389,10 @@ static bool sample_k_diffusion(sample_method_t method, } } // 2. compute alphas, betas - float alpha_prod_t = alphas_cumprod[timestep]; + float alpha_prod_t = static_cast(alphas_cumprod[timestep]); // Note final_alpha_cumprod = alphas_cumprod[0] due to // trailing timestep spacing - float alpha_prod_t_prev = prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]; + float alpha_prod_t_prev = static_cast(prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]); float beta_prod_t = 1 - alpha_prod_t; // 3. compute predicted original sample from predicted // noise also called "predicted x_0" of formula (12) @@ -1442,8 +1439,8 @@ static bool sample_k_diffusion(sample_method_t method, // Two step inner loop without an explicit // tensor float pred_sample_direction = - std::sqrt(1 - alpha_prod_t_prev - - std::pow(std_dev_t, 2)) * + ::sqrtf(1 - alpha_prod_t_prev - + ::powf(std_dev_t, 2)) * vec_model_output[j]; vec_x[j] = std::sqrt(alpha_prod_t_prev) * vec_pred_original_sample[j] + @@ -1518,7 +1515,7 @@ static bool sample_k_diffusion(sample_method_t method, // Begin k-diffusion specific workaround for // evaluating F_theta(x; ...) from D(x, sigma), same // as in DDIM (and see there for detailed comments) - float sigma = compvis_sigmas[timestep]; + float sigma = static_cast(compvis_sigmas[timestep]); if (i == 0) { float* vec_x = (float*)x->data; for (int j = 0; j < ggml_nelements(x); j++) { @@ -1557,14 +1554,14 @@ static bool sample_k_diffusion(sample_method_t method, // is different from the notation alpha_t in // DPM-Solver. In fact, we have alpha_{t_n} = // \sqrt{\hat{alpha_n}}, [...]" - float alpha_prod_t = alphas_cumprod[timestep]; + float alpha_prod_t = static_cast(alphas_cumprod[timestep]); float beta_prod_t = 1 - alpha_prod_t; // Note final_alpha_cumprod = alphas_cumprod[0] since // TCD is always "trailing" - float alpha_prod_t_prev = prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]; + float alpha_prod_t_prev = static_cast(prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]); // The subscript _s are the only portion in this // section (2) unique to TCD - float alpha_prod_s = alphas_cumprod[timestep_s]; + float alpha_prod_s = static_cast(alphas_cumprod[timestep_s]); float beta_prod_s = 1 - alpha_prod_s; // 3. Compute the predicted noised sample x_s based on // the model parameterization diff --git a/examples/cli/avi_writer.h b/examples/cli/avi_writer.h index 84b204af5..53b4749cf 100644 --- a/examples/cli/avi_writer.h +++ b/examples/cli/avi_writer.h @@ -172,9 +172,9 @@ int create_mjpg_avi_from_sd_images(const char* filename, sd_image_t* images, int // Write '00dc' chunk (video frame) fwrite("00dc", 4, 1, f); - write_u32_le(f, jpeg_data.size); + write_u32_le(f, (uint32_t)jpeg_data.size); index[i].offset = ftell(f) - 8; - index[i].size = jpeg_data.size; + index[i].size = (uint32_t)jpeg_data.size; fwrite(jpeg_data.buf, 1, jpeg_data.size, f); // Align to even byte size diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 7ea95ed14..c9e8eeeb8 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -1386,10 +1386,10 @@ struct SDGenerationParams { if (!item.empty()) { try { custom_sigmas.push_back(std::stof(item)); - } catch (const std::invalid_argument& e) { + } catch (const std::invalid_argument&) { LOG_ERROR("error: invalid float value '%s' in --sigmas", item.c_str()); return -1; - } catch (const std::out_of_range& e) { + } catch (const std::out_of_range&) { LOG_ERROR("error: float value '%s' out of range in --sigmas", item.c_str()); return -1; } diff --git a/examples/server/main.cpp b/examples/server/main.cpp index db9f5b1a1..9fa880496 100644 --- a/examples/server/main.cpp +++ b/examples/server/main.cpp @@ -44,7 +44,7 @@ inline bool is_base64(unsigned char c) { } std::vector base64_decode(const std::string& encoded_string) { - int in_len = encoded_string.size(); + int in_len = static_cast(encoded_string.size()); int i = 0; int j = 0; int in_ = 0; @@ -617,7 +617,7 @@ int main(int argc, const char** argv) { int img_h = height; uint8_t* raw_pixels = load_image_from_memory( reinterpret_cast(bytes.data()), - bytes.size(), + static_cast(bytes.size()), img_w, img_h, width, height, 3); @@ -635,7 +635,7 @@ int main(int argc, const char** argv) { int mask_h = height; uint8_t* mask_raw = load_image_from_memory( reinterpret_cast(mask_bytes.data()), - mask_bytes.size(), + static_cast(mask_bytes.size()), mask_w, mask_h, width, height, 1); mask_image = {(uint32_t)mask_w, (uint32_t)mask_h, 1, mask_raw}; diff --git a/flux.hpp b/flux.hpp index 86e2007ad..5d94fc85d 100644 --- a/flux.hpp +++ b/flux.hpp @@ -263,7 +263,7 @@ namespace Flux { bool use_yak_mlp = false, bool use_mlp_silu_act = false) : idx(idx), prune_mod(prune_mod) { - int64_t mlp_hidden_dim = hidden_size * mlp_ratio; + int64_t mlp_hidden_dim = static_cast(hidden_size * mlp_ratio); if (!prune_mod && !share_modulation) { blocks["img_mod"] = std::shared_ptr(new Modulation(hidden_size, true)); @@ -442,7 +442,7 @@ namespace Flux { if (scale <= 0.f) { scale = 1 / sqrt((float)head_dim); } - mlp_hidden_dim = hidden_size * mlp_ratio; + mlp_hidden_dim = static_cast(hidden_size * mlp_ratio); mlp_mult_factor = 1; if (use_yak_mlp || use_mlp_silu_act) { mlp_mult_factor = 2; @@ -744,38 +744,38 @@ namespace Flux { struct ChromaRadianceParams { int64_t nerf_hidden_size = 64; - int64_t nerf_mlp_ratio = 4; - int64_t nerf_depth = 4; - int64_t nerf_max_freqs = 8; + int nerf_mlp_ratio = 4; + int nerf_depth = 4; + int nerf_max_freqs = 8; bool use_x0 = false; bool use_patch_size_32 = false; }; struct FluxParams { - SDVersion version = VERSION_FLUX; - bool is_chroma = false; - int64_t patch_size = 2; - int64_t in_channels = 64; - int64_t out_channels = 64; - int64_t vec_in_dim = 768; - int64_t context_in_dim = 4096; - int64_t hidden_size = 3072; - float mlp_ratio = 4.0f; - int64_t num_heads = 24; - int64_t depth = 19; - int64_t depth_single_blocks = 38; - std::vector axes_dim = {16, 56, 56}; - int64_t axes_dim_sum = 128; - int theta = 10000; - bool qkv_bias = true; - bool guidance_embed = true; - int64_t in_dim = 64; - bool disable_bias = false; - bool share_modulation = false; - bool semantic_txt_norm = false; - bool use_yak_mlp = false; - bool use_mlp_silu_act = false; - float ref_index_scale = 1.f; + SDVersion version = VERSION_FLUX; + bool is_chroma = false; + int patch_size = 2; + int64_t in_channels = 64; + int64_t out_channels = 64; + int64_t vec_in_dim = 768; + int64_t context_in_dim = 4096; + int64_t hidden_size = 3072; + float mlp_ratio = 4.0f; + int num_heads = 24; + int depth = 19; + int depth_single_blocks = 38; + std::vector axes_dim = {16, 56, 56}; + int axes_dim_sum = 128; + int theta = 10000; + bool qkv_bias = true; + bool guidance_embed = true; + int64_t in_dim = 64; + bool disable_bias = false; + bool share_modulation = false; + bool semantic_txt_norm = false; + bool use_yak_mlp = false; + bool use_mlp_silu_act = false; + float ref_index_scale = 1.f; ChromaRadianceParams chroma_radiance_params; }; @@ -969,7 +969,7 @@ namespace Flux { vec = approx->forward(ctx, vec); // [344, N, hidden_size] if (y != nullptr) { - txt_img_mask = ggml_pad(ctx->ggml_ctx, y, img->ne[1], 0, 0, 0); + txt_img_mask = ggml_pad(ctx->ggml_ctx, y, static_cast(img->ne[1]), 0, 0, 0); } } else { auto time_in = std::dynamic_pointer_cast(blocks["time_in"]); @@ -1072,12 +1072,12 @@ namespace Flux { std::vector skip_layers = {}) { GGML_ASSERT(x->ne[3] == 1); - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - int64_t C = x->ne[2]; - int64_t patch_size = params.patch_size; - int pad_h = (patch_size - H % patch_size) % patch_size; - int pad_w = (patch_size - W % patch_size) % patch_size; + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; + int64_t C = x->ne[2]; + int patch_size = params.patch_size; + int pad_h = (patch_size - H % patch_size) % patch_size; + int pad_w = (patch_size - W % patch_size) % patch_size; auto img = pad_to_patch_size(ctx, x); auto orig_img = img; @@ -1146,15 +1146,15 @@ namespace Flux { std::vector skip_layers = {}) { GGML_ASSERT(x->ne[3] == 1); - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - int64_t C = x->ne[2]; - int64_t patch_size = params.patch_size; - int pad_h = (patch_size - H % patch_size) % patch_size; - int pad_w = (patch_size - W % patch_size) % patch_size; + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; + int64_t C = x->ne[2]; + int patch_size = params.patch_size; + int pad_h = (patch_size - H % patch_size) % patch_size; + int pad_w = (patch_size - W % patch_size) % patch_size; - auto img = process_img(ctx, x); - uint64_t img_tokens = img->ne[1]; + auto img = process_img(ctx, x); + int64_t img_tokens = img->ne[1]; if (params.version == VERSION_FLUX_FILL) { GGML_ASSERT(c_concat != nullptr); @@ -1465,11 +1465,11 @@ namespace Flux { txt_arange_dims = {1, 2}; } - pe_vec = Rope::gen_flux_pe(x->ne[1], - x->ne[0], + pe_vec = Rope::gen_flux_pe(static_cast(x->ne[1]), + static_cast(x->ne[0]), flux_params.patch_size, - x->ne[3], - context->ne[1], + static_cast(x->ne[3]), + static_cast(context->ne[1]), txt_arange_dims, ref_latents, increase_ref_index, @@ -1478,7 +1478,7 @@ namespace Flux { circular_y_enabled, circular_x_enabled, flux_params.axes_dim); - int pos_len = pe_vec.size() / flux_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / flux_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, flux_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -1487,10 +1487,10 @@ namespace Flux { set_backend_tensor_data(pe, pe_vec.data()); if (version == VERSION_CHROMA_RADIANCE) { - int64_t patch_size = flux_params.patch_size; - int64_t nerf_max_freqs = flux_params.chroma_radiance_params.nerf_max_freqs; - dct_vec = fetch_dct_pos(patch_size, nerf_max_freqs); - dct = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, nerf_max_freqs * nerf_max_freqs, patch_size * patch_size); + int patch_size = flux_params.patch_size; + int nerf_max_freqs = flux_params.chroma_radiance_params.nerf_max_freqs; + dct_vec = fetch_dct_pos(patch_size, nerf_max_freqs); + dct = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, nerf_max_freqs * nerf_max_freqs, patch_size * patch_size); // dct->data = dct_vec.data(); // print_ggml_tensor(dct); // dct->data = nullptr; @@ -1577,12 +1577,12 @@ namespace Flux { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("flux test done in %dms", t1 - t0); + LOG_DEBUG("flux test done in %lldms", t1 - t0); } } diff --git a/ggml_extend.hpp b/ggml_extend.hpp index 76889a8b2..1ff450116 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -98,10 +98,10 @@ static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); __STATIC_INLINE__ struct ggml_tensor* ggml_ext_mul_n_mode(struct ggml_context* ctx, struct ggml_tensor* a, struct ggml_tensor* b, int mode = 0) { // reshape A // swap 0th and nth axis - a = ggml_cont(ctx, ggml_permute(ctx, a, mode, mode != 1 ? 1 : 0, mode != 2 ? 2 : 0, mode != 3 ? 3 : 0)); - int ne1 = a->ne[1]; - int ne2 = a->ne[2]; - int ne3 = a->ne[3]; + a = ggml_cont(ctx, ggml_permute(ctx, a, mode, mode != 1 ? 1 : 0, mode != 2 ? 2 : 0, mode != 3 ? 3 : 0)); + int64_t ne1 = a->ne[1]; + int64_t ne2 = a->ne[2]; + int64_t ne3 = a->ne[3]; // make 2D a = ggml_cont(ctx, ggml_reshape_2d(ctx, a, a->ne[0], (ne3 * ne2 * ne1))); @@ -167,12 +167,12 @@ __STATIC_INLINE__ void ggml_ext_im_set_randn_f32(struct ggml_tensor* tensor, std } } -__STATIC_INLINE__ void ggml_ext_tensor_set_f32(struct ggml_tensor* tensor, float value, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ void ggml_ext_tensor_set_f32(struct ggml_tensor* tensor, float value, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { GGML_ASSERT(tensor->nb[0] == sizeof(float)); *(float*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]) = value; } -__STATIC_INLINE__ float ggml_ext_tensor_get_f32(const ggml_tensor* tensor, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ float ggml_ext_tensor_get_f32(const ggml_tensor* tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { if (tensor->buffer != nullptr) { float value; ggml_backend_tensor_get(tensor, &value, i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0], sizeof(float)); @@ -182,9 +182,9 @@ __STATIC_INLINE__ float ggml_ext_tensor_get_f32(const ggml_tensor* tensor, int i return *(float*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]); } -__STATIC_INLINE__ int ggml_ext_tensor_get_i32(const ggml_tensor* tensor, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ int ggml_ext_tensor_get_i32(const ggml_tensor* tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { if (tensor->buffer != nullptr) { - float value; + int value; ggml_backend_tensor_get(tensor, &value, i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0], sizeof(int)); return value; } @@ -192,12 +192,12 @@ __STATIC_INLINE__ int ggml_ext_tensor_get_i32(const ggml_tensor* tensor, int i0, return *(int*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]); } -__STATIC_INLINE__ ggml_fp16_t ggml_ext_tensor_get_f16(const ggml_tensor* tensor, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ ggml_fp16_t ggml_ext_tensor_get_f16(const ggml_tensor* tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); return *(ggml_fp16_t*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]); } -__STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int iw, int ih, int ic, bool scale = true) { +__STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int64_t iw, int64_t ih, int64_t ic, bool scale = true) { float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic); if (scale) { value /= 255.f; @@ -205,7 +205,7 @@ __STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int iw, int ih, int i return value; } -__STATIC_INLINE__ float sd_image_get_f32(sd_image_f32_t image, int iw, int ih, int ic, bool scale = true) { +__STATIC_INLINE__ float sd_image_get_f32(sd_image_f32_t image, int64_t iw, int64_t ih, int64_t ic, bool scale = true) { float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic); if (scale) { value /= 255.f; @@ -450,8 +450,8 @@ __STATIC_INLINE__ void ggml_ext_tensor_apply_mask(struct ggml_tensor* image_data int64_t width = output->ne[0]; int64_t height = output->ne[1]; int64_t channels = output->ne[2]; - float rescale_mx = mask->ne[0] / output->ne[0]; - float rescale_my = mask->ne[1] / output->ne[1]; + float rescale_mx = 1.f * mask->ne[0] / output->ne[0]; + float rescale_my = 1.f * mask->ne[1] / output->ne[1]; GGML_ASSERT(output->type == GGML_TYPE_F32); for (int ix = 0; ix < width; ix++) { for (int iy = 0; iy < height; iy++) { @@ -685,7 +685,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_torch_permute(struct ggml_context __STATIC_INLINE__ struct ggml_tensor* ggml_ext_slice(struct ggml_context* ctx, struct ggml_tensor* x, - int64_t dim, + int dim, int64_t start, int64_t end) { GGML_ASSERT(dim >= 0 && dim < 4); @@ -785,7 +785,7 @@ __STATIC_INLINE__ void sd_tiling_calc_tiles(int& num_tiles_dim, int small_dim, int tile_size, const float tile_overlap_factor) { - int tile_overlap = (tile_size * tile_overlap_factor); + int tile_overlap = static_cast(tile_size * tile_overlap_factor); int non_tile_overlap = tile_size - tile_overlap; num_tiles_dim = (small_dim - tile_overlap) / non_tile_overlap; @@ -1346,7 +1346,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context // LOG_DEBUG("attention_ext L_q:%d L_k:%d n_head:%d C:%d d_head:%d N:%d", L_q, L_k, n_head, C, d_head, N); bool can_use_flash_attn = true; if (can_use_flash_attn && L_k % 256 != 0) { - kv_pad = GGML_PAD(L_k, 256) - L_k; + kv_pad = GGML_PAD(L_k, 256) - static_cast(L_k); } if (mask != nullptr) { @@ -2361,53 +2361,6 @@ class Conv2d : public UnaryBlock { } }; -class Conv3dnx1x1 : public UnaryBlock { -protected: - int64_t in_channels; - int64_t out_channels; - int64_t kernel_size; - int64_t stride; - int64_t padding; - int64_t dilation; - bool bias; - - void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map, const std::string prefix = "") override { - enum ggml_type wtype = GGML_TYPE_F16; - params["weight"] = ggml_new_tensor_4d(ctx, wtype, 1, kernel_size, in_channels, out_channels); // 5d => 4d - if (bias) { - enum ggml_type wtype = GGML_TYPE_F32; - params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels); - } - } - -public: - Conv3dnx1x1(int64_t in_channels, - int64_t out_channels, - int64_t kernel_size, - int64_t stride = 1, - int64_t padding = 0, - int64_t dilation = 1, - bool bias = true) - : in_channels(in_channels), - out_channels(out_channels), - kernel_size(kernel_size), - stride(stride), - padding(padding), - dilation(dilation), - bias(bias) {} - - // x: [N, IC, ID, IH*IW] - // result: [N, OC, OD, OH*OW] - struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { - struct ggml_tensor* w = params["weight"]; - struct ggml_tensor* b = nullptr; - if (bias) { - b = params["bias"]; - } - return ggml_ext_conv_3d_nx1x1(ctx->ggml_ctx, x, w, b, stride, padding, dilation); - } -}; - class Conv3d : public UnaryBlock { protected: int64_t in_channels; @@ -2523,7 +2476,7 @@ class LayerNorm : public UnaryBlock { class GroupNorm : public GGMLBlock { protected: - int64_t num_groups; + int num_groups; int64_t num_channels; float eps; bool affine; @@ -2540,7 +2493,7 @@ class GroupNorm : public GGMLBlock { } public: - GroupNorm(int64_t num_groups, + GroupNorm(int num_groups, int64_t num_channels, float eps = 1e-05f, bool affine = true) diff --git a/gguf_reader.hpp b/gguf_reader.hpp index 53482662e..2cc4d9d9c 100644 --- a/gguf_reader.hpp +++ b/gguf_reader.hpp @@ -151,7 +151,7 @@ class GGUFReader { } if (n_dims > GGML_MAX_DIMS) { - for (int i = GGML_MAX_DIMS; i < n_dims; i++) { + for (uint32_t i = GGML_MAX_DIMS; i < n_dims; i++) { info.shape[GGML_MAX_DIMS - 1] *= info.shape[i]; // stack to last dim; } info.shape.resize(GGML_MAX_DIMS); diff --git a/latent-preview.h b/latent-preview.h index 2c54c3b5e..76e17415c 100644 --- a/latent-preview.h +++ b/latent-preview.h @@ -166,12 +166,12 @@ float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f}; void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) { size_t buffer_head = 0; - uint32_t latent_width = latents->ne[0]; - uint32_t latent_height = latents->ne[1]; - uint32_t dim = latents->ne[ggml_n_dims(latents) - 1]; + uint32_t latent_width = static_cast(latents->ne[0]); + uint32_t latent_height = static_cast(latents->ne[1]); + uint32_t dim = static_cast(latents->ne[ggml_n_dims(latents) - 1]); uint32_t frames = 1; if (ggml_n_dims(latents) == 4) { - frames = latents->ne[2]; + frames = static_cast(latents->ne[2]); } uint32_t rgb_width = latent_width * patch_size; @@ -179,9 +179,9 @@ void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const fl uint32_t unpatched_dim = dim / (patch_size * patch_size); - for (int k = 0; k < frames; k++) { - for (int rgb_x = 0; rgb_x < rgb_width; rgb_x++) { - for (int rgb_y = 0; rgb_y < rgb_height; rgb_y++) { + for (uint32_t k = 0; k < frames; k++) { + for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) { + for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) { int latent_x = rgb_x / patch_size; int latent_y = rgb_y / patch_size; @@ -197,7 +197,7 @@ void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const fl float r = 0, g = 0, b = 0; if (latent_rgb_proj != nullptr) { - for (int d = 0; d < unpatched_dim; d++) { + for (uint32_t d = 0; d < unpatched_dim; d++) { float value = *(float*)((char*)latents->data + latent_id + (d * patch_size * patch_size + channel_offset) * latents->nb[ggml_n_dims(latents) - 1]); r += value * latent_rgb_proj[d][0]; g += value * latent_rgb_proj[d][1]; diff --git a/llm.hpp b/llm.hpp index dc04c84cc..67b1ea165 100644 --- a/llm.hpp +++ b/llm.hpp @@ -195,14 +195,14 @@ namespace LLM { tokens.insert(tokens.begin(), BOS_TOKEN_ID); } if (max_length > 0 && padding) { - size_t n = std::ceil(tokens.size() * 1.0 / max_length); + size_t n = static_cast(std::ceil(tokens.size() * 1.f / max_length)); if (n == 0) { n = 1; } size_t length = max_length * n; LOG_DEBUG("token length: %llu", length); tokens.insert(tokens.end(), length - tokens.size(), PAD_TOKEN_ID); - weights.insert(weights.end(), length - weights.size(), 1.0); + weights.insert(weights.end(), length - weights.size(), 1.f); } } @@ -377,7 +377,7 @@ namespace LLM { try { vocab = nlohmann::json::parse(vocab_utf8_str); - } catch (const nlohmann::json::parse_error& e) { + } catch (const nlohmann::json::parse_error&) { GGML_ABORT("invalid vocab json str"); } for (const auto& [key, value] : vocab.items()) { @@ -386,7 +386,7 @@ namespace LLM { encoder[token] = i; decoder[i] = token; } - encoder_len = vocab.size(); + encoder_len = static_cast(vocab.size()); LOG_DEBUG("vocab size: %d", encoder_len); auto byte_unicode_pairs = bytes_to_unicode(); @@ -485,16 +485,16 @@ namespace LLM { }; struct LLMVisionParams { - int64_t num_layers = 32; + int num_layers = 32; int64_t hidden_size = 1280; int64_t intermediate_size = 3420; - int64_t num_heads = 16; + int num_heads = 16; int64_t in_channels = 3; int64_t out_hidden_size = 3584; - int64_t temporal_patch_size = 2; - int64_t patch_size = 14; - int64_t spatial_merge_size = 2; - int64_t window_size = 112; + int temporal_patch_size = 2; + int patch_size = 14; + int spatial_merge_size = 2; + int window_size = 112; std::set fullatt_block_indexes = {7, 15, 23, 31}; }; @@ -503,9 +503,9 @@ namespace LLM { int64_t num_layers = 28; int64_t hidden_size = 3584; int64_t intermediate_size = 18944; - int64_t num_heads = 28; - int64_t num_kv_heads = 4; - int64_t head_dim = 128; + int num_heads = 28; + int num_kv_heads = 4; + int head_dim = 128; bool qkv_bias = true; bool qk_norm = false; int64_t vocab_size = 152064; @@ -647,15 +647,15 @@ namespace LLM { struct VisionAttention : public GGMLBlock { protected: bool llama_cpp_style; - int64_t head_dim; - int64_t num_heads; + int head_dim; + int num_heads; public: VisionAttention(bool llama_cpp_style, int64_t hidden_size, - int64_t num_heads) + int num_heads) : llama_cpp_style(llama_cpp_style), num_heads(num_heads) { - head_dim = hidden_size / num_heads; + head_dim = static_cast(hidden_size / num_heads); GGML_ASSERT(num_heads * head_dim == hidden_size); if (llama_cpp_style) { blocks["q_proj"] = std::shared_ptr(new Linear(hidden_size, hidden_size)); @@ -709,7 +709,7 @@ namespace LLM { VisionBlock(bool llama_cpp_style, int64_t hidden_size, int64_t intermediate_size, - int64_t num_heads, + int num_heads, float eps = 1e-6f) { blocks["attn"] = std::shared_ptr(new VisionAttention(llama_cpp_style, hidden_size, num_heads)); blocks["mlp"] = std::shared_ptr(new MLP(hidden_size, intermediate_size, true)); @@ -743,22 +743,22 @@ namespace LLM { struct VisionModel : public GGMLBlock { protected: - int64_t num_layers; - int64_t spatial_merge_size; + int num_layers; + int spatial_merge_size; std::set fullatt_block_indexes; public: VisionModel(bool llama_cpp_style, - int64_t num_layers, + int num_layers, int64_t in_channels, int64_t hidden_size, int64_t out_hidden_size, int64_t intermediate_size, - int64_t num_heads, - int64_t spatial_merge_size, - int64_t patch_size, - int64_t temporal_patch_size, - int64_t window_size, + int num_heads, + int spatial_merge_size, + int patch_size, + int temporal_patch_size, + int window_size, std::set fullatt_block_indexes = {7, 15, 23, 31}, float eps = 1e-6f) : num_layers(num_layers), fullatt_block_indexes(std::move(fullatt_block_indexes)), spatial_merge_size(spatial_merge_size) { @@ -817,7 +817,7 @@ namespace LLM { struct Attention : public GGMLBlock { protected: LLMArch arch; - int64_t head_dim; + int head_dim; int64_t num_heads; int64_t num_kv_heads; bool qk_norm; @@ -1227,11 +1227,11 @@ namespace LLM { } int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) { - int grid_t = 1; - int grid_h = h / params.vision.patch_size; - int grid_w = w / params.vision.patch_size; - int llm_grid_h = grid_h / params.vision.spatial_merge_size; - int llm_grid_w = grid_w / params.vision.spatial_merge_size; + int64_t grid_t = 1; + int64_t grid_h = h / params.vision.patch_size; + int64_t grid_w = w / params.vision.patch_size; + int64_t llm_grid_h = grid_h / params.vision.spatial_merge_size; + int64_t llm_grid_w = grid_w / params.vision.spatial_merge_size; return grid_t * grid_h * grid_w; } @@ -1269,8 +1269,8 @@ namespace LLM { GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0); int grid_t = 1; - int grid_h = image->ne[1] / params.vision.patch_size; - int grid_w = image->ne[0] / params.vision.patch_size; + int grid_h = static_cast(image->ne[1]) / params.vision.patch_size; + int grid_w = static_cast(image->ne[0]) / params.vision.patch_size; int llm_grid_h = grid_h / params.vision.spatial_merge_size; int llm_grid_w = grid_w / params.vision.spatial_merge_size; int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size; @@ -1358,14 +1358,14 @@ namespace LLM { set_backend_tensor_data(window_mask, window_mask_vec.data()); // pe - int head_dim = params.vision.hidden_size / params.vision.num_heads; + int head_dim = static_cast(params.vision.hidden_size / params.vision.num_heads); pe_vec = Rope::gen_qwen2vl_pe(grid_h, grid_w, params.vision.spatial_merge_size, window_inverse_index_vec, - 10000.f, + 10000, {head_dim / 2, head_dim / 2}); - int pos_len = pe_vec.size() / head_dim / 2; + int pos_len = static_cast(pe_vec.size() / head_dim / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, head_dim / 2, pos_len); // pe->data = pe_vec.data(); @@ -1485,13 +1485,13 @@ namespace LLM { print_ggml_tensor(image, false, "image"); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.encode_image(8, image, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out, false, "image_embed"); image_embed = out; - LOG_DEBUG("llm encode_image test done in %dms", t1 - t0); + LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0); } std::string placeholder = "<|image_pad|>"; @@ -1524,12 +1524,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.compute(8, input_ids, image_embeds, {}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else if (test_vit) { // auto image = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 280, 280, 3); // ggml_set_f32(image, 0.f); @@ -1537,16 +1537,16 @@ namespace LLM { print_ggml_tensor(image, false, "image"); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.encode_image(8, image, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out, false, "out"); // auto ref_out = load_tensor_from_file(work_ctx, "qwen2vl.bin"); // ggml_ext_tensor_diff(ref_out, out, 0.01f); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else if (test_mistral) { std::pair prompt_attn_range; std::string text = "[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]"; @@ -1564,12 +1564,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.compute(8, input_ids, {}, {10, 20, 30}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else if (test_qwen3) { std::pair prompt_attn_range; std::string text = "<|im_start|>user\n"; @@ -1587,12 +1587,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.compute(8, input_ids, {}, {35}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else { std::pair prompt_attn_range; std::string text = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n"; @@ -1610,12 +1610,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.compute(8, input_ids, {}, {}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } } diff --git a/mmdit.hpp b/mmdit.hpp index eeb74a268..cb4be7bf0 100644 --- a/mmdit.hpp +++ b/mmdit.hpp @@ -97,12 +97,12 @@ struct PatchEmbed : public GGMLBlock { struct TimestepEmbedder : public GGMLBlock { // Embeds scalar timesteps into vector representations. protected: - int64_t frequency_embedding_size; + int frequency_embedding_size; public: TimestepEmbedder(int64_t hidden_size, - int64_t frequency_embedding_size = 256, - int64_t out_channels = 0) + int frequency_embedding_size = 256, + int64_t out_channels = 0) : frequency_embedding_size(frequency_embedding_size) { if (out_channels <= 0) { out_channels = hidden_size; @@ -167,11 +167,11 @@ class SelfAttention : public GGMLBlock { blocks["proj"] = std::shared_ptr(new Linear(dim, dim)); } if (qk_norm == "rms") { - blocks["ln_q"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6)); - blocks["ln_k"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6)); + blocks["ln_q"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6f)); + blocks["ln_k"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6f)); } else if (qk_norm == "ln") { - blocks["ln_q"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6)); - blocks["ln_k"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6)); + blocks["ln_q"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6f)); + blocks["ln_k"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6f)); } } @@ -623,7 +623,7 @@ struct MMDiT : public GGMLBlock { // Diffusion model with a Transformer backbone. protected: int64_t input_size = -1; - int64_t patch_size = 2; + int patch_size = 2; int64_t in_channels = 16; int64_t d_self = -1; // >=0 for MMdiT-X int64_t depth = 24; @@ -943,12 +943,12 @@ struct MMDiTRunner : public GGMLRunner { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, y, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("mmdit test done in %dms", t1 - t0); + LOG_DEBUG("mmdit test done in %lldms", t1 - t0); } } diff --git a/model.cpp b/model.cpp index dcb77e816..a19f180da 100644 --- a/model.cpp +++ b/model.cpp @@ -436,7 +436,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s name, gguf_tensor_info.type, gguf_tensor_info.shape.data(), - gguf_tensor_info.shape.size(), + static_cast(gguf_tensor_info.shape.size()), file_index, data_offset + gguf_tensor_info.offset); @@ -448,7 +448,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s return true; } - int n_tensors = gguf_get_n_tensors(ctx_gguf_); + int n_tensors = static_cast(gguf_get_n_tensors(ctx_gguf_)); size_t total_size = 0; size_t data_offset = gguf_get_data_offset(ctx_gguf_); @@ -1570,7 +1570,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread break; } size_t curr_num = total_tensors_processed + current_idx; - pretty_progress(curr_num, total_tensors_to_process, (ggml_time_ms() - t_start) / 1000.0f / (curr_num + 1e-6f)); + pretty_progress(static_cast(curr_num), static_cast(total_tensors_to_process), (ggml_time_ms() - t_start) / 1000.0f / (curr_num + 1e-6f)); std::this_thread::sleep_for(std::chrono::milliseconds(200)); } @@ -1583,7 +1583,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread break; } total_tensors_processed += file_tensors.size(); - pretty_progress(total_tensors_processed, total_tensors_to_process, (ggml_time_ms() - t_start) / 1000.0f / (total_tensors_processed + 1e-6f)); + pretty_progress(static_cast(total_tensors_processed), static_cast(total_tensors_to_process), (ggml_time_ms() - t_start) / 1000.0f / (total_tensors_processed + 1e-6f)); if (total_tensors_processed < total_tensors_to_process) { printf("\n"); } diff --git a/pmid.hpp b/pmid.hpp index d69423ad3..5b70dab66 100644 --- a/pmid.hpp +++ b/pmid.hpp @@ -72,7 +72,7 @@ struct PerceiverAttention : public GGMLBlock { int heads; // = heads public: PerceiverAttention(int dim, int dim_h = 64, int h = 8) - : scale(powf(dim_h, -0.5)), dim_head(dim_h), heads(h) { + : scale(powf(static_cast(dim_h), -0.5f)), dim_head(dim_h), heads(h) { int inner_dim = dim_head * heads; blocks["norm1"] = std::shared_ptr(new LayerNorm(dim)); blocks["norm2"] = std::shared_ptr(new LayerNorm(dim)); diff --git a/preprocessing.hpp b/preprocessing.hpp index 4a1b85144..84e0ed3f8 100644 --- a/preprocessing.hpp +++ b/preprocessing.hpp @@ -2,7 +2,7 @@ #define __PREPROCESSING_HPP__ #include "ggml_extend.hpp" -#define M_PI_ 3.14159265358979323846 +#define M_PI_ 3.14159265358979323846f void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) { struct ggml_init_params params; @@ -20,13 +20,13 @@ void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml } void gaussian_kernel(struct ggml_tensor* kernel) { - int ks_mid = kernel->ne[0] / 2; + int ks_mid = static_cast(kernel->ne[0] / 2); float sigma = 1.4f; float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f)); for (int y = 0; y < kernel->ne[0]; y++) { - float gx = -ks_mid + y; + float gx = static_cast(-ks_mid + y); for (int x = 0; x < kernel->ne[1]; x++) { - float gy = -ks_mid + x; + float gy = static_cast(-ks_mid + x); float k_ = expf(-((gx * gx + gy * gy) / (2.0f * powf(sigma, 2.0f)))) * normal; ggml_ext_tensor_set_f32(kernel, k_, x, y); } @@ -46,7 +46,7 @@ void grayscale(struct ggml_tensor* rgb_img, struct ggml_tensor* grayscale) { } void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) { - int n_elements = ggml_nelements(h); + int n_elements = static_cast(ggml_nelements(h)); float* dx = (float*)x->data; float* dy = (float*)y->data; float* dh = (float*)h->data; @@ -56,7 +56,7 @@ void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor } void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) { - int n_elements = ggml_nelements(h); + int n_elements = static_cast(ggml_nelements(h)); float* dx = (float*)x->data; float* dy = (float*)y->data; float* dh = (float*)h->data; @@ -66,7 +66,7 @@ void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tens } void normalize_tensor(struct ggml_tensor* g) { - int n_elements = ggml_nelements(g); + int n_elements = static_cast(ggml_nelements(g)); float* dg = (float*)g->data; float max = -INFINITY; for (int i = 0; i < n_elements; i++) { @@ -118,7 +118,7 @@ void non_max_supression(struct ggml_tensor* result, struct ggml_tensor* G, struc } void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) { - int n_elements = ggml_nelements(img); + int n_elements = static_cast(ggml_nelements(img)); float* imd = (float*)img->data; float max = -INFINITY; for (int i = 0; i < n_elements; i++) { @@ -209,8 +209,8 @@ bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, non_max_supression(image_gray, G, tetha); threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong); // to RGB channels - for (int iy = 0; iy < img.height; iy++) { - for (int ix = 0; ix < img.width; ix++) { + for (uint32_t iy = 0; iy < img.height; iy++) { + for (uint32_t ix = 0; ix < img.width; ix++) { float gray = ggml_ext_tensor_get_f32(image_gray, ix, iy); gray = inverse ? 1.0f - gray : gray; ggml_ext_tensor_set_f32(image, gray, ix, iy); diff --git a/qwen_image.hpp b/qwen_image.hpp index 9c095a9a0..ec2231b01 100644 --- a/qwen_image.hpp +++ b/qwen_image.hpp @@ -350,16 +350,16 @@ namespace Qwen { }; struct QwenImageParams { - int64_t patch_size = 2; + int patch_size = 2; int64_t in_channels = 64; int64_t out_channels = 16; - int64_t num_layers = 60; + int num_layers = 60; int64_t attention_head_dim = 128; int64_t num_attention_heads = 24; int64_t joint_attention_dim = 3584; - float theta = 10000; + int theta = 10000; std::vector axes_dim = {16, 56, 56}; - int64_t axes_dim_sum = 128; + int axes_dim_sum = 128; bool zero_cond_t = false; }; @@ -513,8 +513,8 @@ namespace Qwen { int64_t C = x->ne[2]; int64_t N = x->ne[3]; - auto img = process_img(ctx, x); - uint64_t img_tokens = img->ne[1]; + auto img = process_img(ctx, x); + int64_t img_tokens = img->ne[1]; if (ref_latents.size() > 0) { for (ggml_tensor* ref : ref_latents) { @@ -613,18 +613,18 @@ namespace Qwen { ref_latents[i] = to_backend(ref_latents[i]); } - pe_vec = Rope::gen_qwen_image_pe(x->ne[1], - x->ne[0], + pe_vec = Rope::gen_qwen_image_pe(static_cast(x->ne[1]), + static_cast(x->ne[0]), qwen_image_params.patch_size, - x->ne[3], - context->ne[1], + static_cast(x->ne[3]), + static_cast(context->ne[1]), ref_latents, increase_ref_index, qwen_image_params.theta, circular_y_enabled, circular_x_enabled, qwen_image_params.axes_dim); - int pos_len = pe_vec.size() / qwen_image_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / qwen_image_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, qwen_image_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -715,12 +715,12 @@ namespace Qwen { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, {}, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("qwen_image test done in %dms", t1 - t0); + LOG_DEBUG("qwen_image test done in %lldms", t1 - t0); } } diff --git a/rng_mt19937.hpp b/rng_mt19937.hpp index 7e6199886..734554bfc 100644 --- a/rng_mt19937.hpp +++ b/rng_mt19937.hpp @@ -90,7 +90,7 @@ class MT19937RNG : public RNG { float u1 = 1.0f - data[j]; float u2 = data[j + 8]; float r = std::sqrt(-2.0f * std::log(u1)); - float theta = 2.0f * 3.14159265358979323846 * u2; + float theta = 2.0f * 3.14159265358979323846f * u2; data[j] = r * std::cos(theta) * std + mean; data[j + 8] = r * std::sin(theta) * std + mean; } diff --git a/rope.hpp b/rope.hpp index 4e6136c11..2d123b3cc 100644 --- a/rope.hpp +++ b/rope.hpp @@ -22,11 +22,11 @@ namespace Rope { } __STATIC_INLINE__ std::vector> transpose(const std::vector>& mat) { - int rows = mat.size(); - int cols = mat[0].size(); + size_t rows = mat.size(); + size_t cols = mat[0].size(); std::vector> transposed(cols, std::vector(rows)); - for (int i = 0; i < rows; ++i) { - for (int j = 0; j < cols; ++j) { + for (size_t i = 0; i < rows; ++i) { + for (size_t j = 0; j < cols; ++j) { transposed[j][i] = mat[i][j]; } } @@ -52,13 +52,13 @@ namespace Rope { std::vector omega(half_dim); for (int i = 0; i < half_dim; ++i) { - omega[i] = 1.0f / std::pow(theta, scale[i]); + omega[i] = 1.0f / ::powf(1.f * theta, scale[i]); } - int pos_size = pos.size(); + size_t pos_size = pos.size(); std::vector> out(pos_size, std::vector(half_dim)); - for (int i = 0; i < pos_size; ++i) { - for (int j = 0; j < half_dim; ++j) { + for (size_t i = 0; i < pos_size; ++i) { + for (size_t j = 0; j < half_dim; ++j) { float angle = pos[i] * omega[j]; if (!axis_wrap_dims.empty()) { size_t wrap_size = axis_wrap_dims.size(); @@ -99,7 +99,7 @@ namespace Rope { for (int dim = 0; dim < axes_dim_num; dim++) { if (arange_dims.find(dim) != arange_dims.end()) { for (int i = 0; i < bs * context_len; i++) { - txt_ids[i][dim] = (i % context_len); + txt_ids[i][dim] = 1.f * (i % context_len); } } } @@ -128,12 +128,12 @@ namespace Rope { w_start -= w_len / 2; } - std::vector row_ids = linspace(h_start, h_start + h_len - 1, h_len); - std::vector col_ids = linspace(w_start, w_start + w_len - 1, w_len); + std::vector row_ids = linspace(1.f * h_start, 1.f * h_start + h_len - 1, h_len); + std::vector col_ids = linspace(1.f * w_start, 1.f * w_start + w_len - 1, w_len); for (int i = 0; i < h_len; ++i) { for (int j = 0; j < w_len; ++j) { - img_ids[i * w_len + j][0] = index; + img_ids[i * w_len + j][0] = 1.f * index; img_ids[i * w_len + j][1] = row_ids[i]; img_ids[i * w_len + j][2] = col_ids[j]; } @@ -172,7 +172,7 @@ namespace Rope { const std::vector>& wrap_dims = {}) { std::vector> trans_ids = transpose(ids); size_t pos_len = ids.size() / bs; - int num_axes = axes_dim.size(); + size_t num_axes = axes_dim.size(); // for (int i = 0; i < pos_len; i++) { // std::cout << trans_ids[0][i] << " " << trans_ids[1][i] << " " << trans_ids[2][i] << std::endl; // } @@ -182,8 +182,8 @@ namespace Rope { emb_dim += d / 2; std::vector> emb(bs * pos_len, std::vector(emb_dim * 2 * 2, 0.0)); - int offset = 0; - for (int i = 0; i < num_axes; ++i) { + size_t offset = 0; + for (size_t i = 0; i < num_axes; ++i) { std::vector axis_wrap_dims; if (!wrap_dims.empty() && i < (int)wrap_dims.size()) { axis_wrap_dims = wrap_dims[i]; @@ -211,12 +211,12 @@ namespace Rope { float ref_index_scale, bool scale_rope) { std::vector> ids; - uint64_t curr_h_offset = 0; - uint64_t curr_w_offset = 0; - int index = 1; + int curr_h_offset = 0; + int curr_w_offset = 0; + int index = 1; for (ggml_tensor* ref : ref_latents) { - uint64_t h_offset = 0; - uint64_t w_offset = 0; + int h_offset = 0; + int w_offset = 0; if (!increase_ref_index) { if (ref->ne[1] + curr_h_offset > ref->ne[0] + curr_w_offset) { w_offset = curr_w_offset; @@ -226,8 +226,8 @@ namespace Rope { scale_rope = false; } - auto ref_ids = gen_flux_img_ids(ref->ne[1], - ref->ne[0], + auto ref_ids = gen_flux_img_ids(static_cast(ref->ne[1]), + static_cast(ref->ne[0]), patch_size, bs, axes_dim_num, @@ -241,8 +241,8 @@ namespace Rope { index++; } - curr_h_offset = std::max(curr_h_offset, ref->ne[1] + h_offset); - curr_w_offset = std::max(curr_w_offset, ref->ne[0] + w_offset); + curr_h_offset = std::max(curr_h_offset, static_cast(ref->ne[1]) + h_offset); + curr_w_offset = std::max(curr_w_offset, static_cast(ref->ne[0]) + w_offset); } return ids; } @@ -345,7 +345,7 @@ namespace Rope { int h_len = (h + (patch_size / 2)) / patch_size; int w_len = (w + (patch_size / 2)) / patch_size; int txt_id_start = std::max(h_len, w_len); - auto txt_ids = linspace(txt_id_start, context_len + txt_id_start, context_len); + auto txt_ids = linspace(1.f * txt_id_start, 1.f * context_len + txt_id_start, context_len); std::vector> txt_ids_repeated(bs * context_len, std::vector(3)); for (int i = 0; i < bs; ++i) { for (int j = 0; j < txt_ids.size(); ++j) { @@ -440,9 +440,9 @@ namespace Rope { std::vector> vid_ids(t_len * h_len * w_len, std::vector(3, 0.0)); - std::vector t_ids = linspace(t_offset, t_len - 1 + t_offset, t_len); - std::vector h_ids = linspace(h_offset, h_len - 1 + h_offset, h_len); - std::vector w_ids = linspace(w_offset, w_len - 1 + w_offset, w_len); + std::vector t_ids = linspace(1.f * t_offset, 1.f * t_len - 1 + t_offset, t_len); + std::vector h_ids = linspace(1.f * h_offset, 1.f * h_len - 1 + h_offset, h_len); + std::vector w_ids = linspace(1.f * w_offset, 1.f * w_len - 1 + w_offset, w_len); for (int i = 0; i < t_len; ++i) { for (int j = 0; j < h_len; ++j) { @@ -493,8 +493,8 @@ namespace Rope { GGML_ASSERT(i < grid_h * grid_w); - ids[i][0] = ih + iy; - ids[i][1] = iw + ix; + ids[i][0] = static_cast(ih + iy); + ids[i][1] = static_cast(iw + ix); index++; } } diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 1c8c55ba8..75689ff94 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -534,7 +534,7 @@ class StableDiffusionGGML { version); } else { // SD1.x SD2.x SDXL std::map embbeding_map; - for (int i = 0; i < sd_ctx_params->embedding_count; i++) { + for (uint32_t i = 0; i < sd_ctx_params->embedding_count; i++) { embbeding_map.emplace(SAFE_STR(sd_ctx_params->embeddings[i].name), SAFE_STR(sd_ctx_params->embeddings[i].path)); } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { @@ -1191,7 +1191,7 @@ class StableDiffusionGGML { void apply_loras(const sd_lora_t* loras, uint32_t lora_count) { std::unordered_map lora_f2m; - for (int i = 0; i < lora_count; i++) { + for (uint32_t i = 0; i < lora_count; i++) { std::string lora_id = SAFE_STR(loras[i].path); if (loras[i].is_high_noise) { lora_id = "|high_noise|" + lora_id; @@ -1443,12 +1443,12 @@ class StableDiffusionGGML { void* step_callback_data, bool is_noisy) { const uint32_t channel = 3; - uint32_t width = latents->ne[0]; - uint32_t height = latents->ne[1]; - uint32_t dim = latents->ne[ggml_n_dims(latents) - 1]; + uint32_t width = static_cast(latents->ne[0]); + uint32_t height = static_cast(latents->ne[1]); + uint32_t dim = static_cast(latents->ne[ggml_n_dims(latents) - 1]); if (preview_mode == PREVIEW_PROJ) { - int64_t patch_sz = 1; + int patch_sz = 1; const float(*latent_rgb_proj)[channel] = nullptr; float* latent_rgb_bias = nullptr; @@ -1508,7 +1508,7 @@ class StableDiffusionGGML { uint32_t frames = 1; if (ggml_n_dims(latents) == 4) { - frames = latents->ne[2]; + frames = static_cast(latents->ne[2]); } uint32_t img_width = width * patch_sz; @@ -1518,7 +1518,7 @@ class StableDiffusionGGML { preview_latent_video(data, latents, latent_rgb_proj, latent_rgb_bias, patch_sz); sd_image_t* images = (sd_image_t*)malloc(frames * sizeof(sd_image_t)); - for (int i = 0; i < frames; i++) { + for (uint32_t i = 0; i < frames; i++) { images[i] = {img_width, img_height, channel, data + i * img_width * img_height * channel}; } step_callback(step, frames, images, is_noisy, step_callback_data); @@ -1563,22 +1563,22 @@ class StableDiffusionGGML { ggml_ext_tensor_clamp_inplace(result, 0.0f, 1.0f); uint32_t frames = 1; if (ggml_n_dims(latents) == 4) { - frames = result->ne[2]; + frames = static_cast(result->ne[2]); } sd_image_t* images = (sd_image_t*)malloc(frames * sizeof(sd_image_t)); // print_ggml_tensor(result,true); for (size_t i = 0; i < frames; i++) { - images[i].width = result->ne[0]; - images[i].height = result->ne[1]; + images[i].width = static_cast(result->ne[0]); + images[i].height = static_cast(result->ne[1]); images[i].channel = 3; - images[i].data = ggml_tensor_to_sd_image(result, i, ggml_n_dims(latents) == 4); + images[i].data = ggml_tensor_to_sd_image(result, static_cast(i), ggml_n_dims(latents) == 4); } step_callback(step, frames, images, is_noisy, step_callback_data); ggml_ext_tensor_scale_inplace(result, 0); - for (int i = 0; i < frames; i++) { + for (uint32_t i = 0; i < frames; i++) { free(images[i].data); } @@ -1800,7 +1800,7 @@ class StableDiffusionGGML { int64_t H = x->ne[1] * get_vae_scale_factor(); if (ggml_n_dims(x) == 4) { // assuming video mode (if batch processing gets implemented this will break) - int T = x->ne[2]; + int64_t T = x->ne[2]; if (sd_version_is_wan(version)) { T = ((T - 1) * 4) + 1; } @@ -2077,7 +2077,7 @@ class StableDiffusionGGML { img_cond_data = (float*)out_img_cond->data; } - int step_count = sigmas.size(); + int step_count = static_cast(sigmas.size()); bool is_skiplayer_step = has_skiplayer && step > (int)(guidance.slg.layer_start * step_count) && step < (int)(guidance.slg.layer_end * step_count); float* skip_layer_data = has_skiplayer ? (float*)out_skip->data : nullptr; if (is_skiplayer_step) { @@ -2449,11 +2449,11 @@ class StableDiffusionGGML { int& tile_size_y, float& tile_overlap, const sd_tiling_params_t& params, - int latent_x, - int latent_y, + int64_t latent_x, + int64_t latent_y, float encoding_factor = 1.0f) { tile_overlap = std::max(std::min(params.target_overlap, 0.5f), 0.0f); - auto get_tile_size = [&](int requested_size, float factor, int latent_size) { + auto get_tile_size = [&](int requested_size, float factor, int64_t latent_size) { const int default_tile_size = 32; const int min_tile_dimension = 4; int tile_size = default_tile_size; @@ -2462,12 +2462,12 @@ class StableDiffusionGGML { if (factor > 0.f) { if (factor > 1.0) factor = 1 / (factor - factor * tile_overlap + tile_overlap); - tile_size = std::round(latent_size * factor); + tile_size = static_cast(std::round(latent_size * factor)); } else if (requested_size >= min_tile_dimension) { tile_size = requested_size; } - tile_size *= encoding_factor; - return std::max(std::min(tile_size, latent_size), min_tile_dimension); + tile_size = static_cast(tile_size * encoding_factor); + return std::max(std::min(tile_size, static_cast(latent_size)), min_tile_dimension); }; tile_size_x = get_tile_size(params.tile_size_x, params.rel_size_x, latent_x); @@ -2478,13 +2478,13 @@ class StableDiffusionGGML { int64_t t0 = ggml_time_ms(); ggml_tensor* result = nullptr; const int vae_scale_factor = get_vae_scale_factor(); - int W = x->ne[0] / vae_scale_factor; - int H = x->ne[1] / vae_scale_factor; - int C = get_latent_channel(); + int64_t W = x->ne[0] / vae_scale_factor; + int64_t H = x->ne[1] / vae_scale_factor; + int64_t C = get_latent_channel(); if (vae_tiling_params.enabled && !encode_video) { // TODO wan2.2 vae support? - int ne2; - int ne3; + int64_t ne2; + int64_t ne3; if (sd_version_is_qwen_image(version)) { ne2 = 1; ne3 = C * x->ne[3]; @@ -2608,7 +2608,7 @@ class StableDiffusionGGML { int64_t C = 3; ggml_tensor* result = nullptr; if (decode_video) { - int T = x->ne[2]; + int64_t T = x->ne[2]; if (sd_version_is_wan(version)) { T = ((T - 1) * 4) + 1; } @@ -3193,7 +3193,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, guidance.img_cfg = guidance.txt_cfg; } - int sample_steps = sigmas.size() - 1; + int sample_steps = static_cast(sigmas.size() - 1); int64_t t0 = ggml_time_ms(); @@ -3203,7 +3203,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, condition_params.width = width; condition_params.height = height; condition_params.ref_images = ref_images; - condition_params.adm_in_channels = sd_ctx->sd->diffusion_model->get_adm_in_channels(); + condition_params.adm_in_channels = static_cast(sd_ctx->sd->diffusion_model->get_adm_in_channels()); // Photo Maker SDCondition id_cond = sd_ctx->sd->get_pmid_conditon(work_ctx, pm_params, condition_params); @@ -3799,7 +3799,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s // timesteps ∝ sigmas for Flow models (like wan2.2 a14b) for (size_t i = 0; i < sigmas.size(); ++i) { if (sigmas[i] < sd_vid_gen_params->moe_boundary) { - high_noise_sample_steps = i; + high_noise_sample_steps = static_cast(i); break; } } @@ -3977,7 +3977,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s int64_t length = inactive->ne[2]; if (ref_image_latent) { length += 1; - frames = (length - 1) * 4 + 1; + frames = static_cast((length - 1) * 4 + 1); ref_image_num = 1; } vace_context = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, inactive->ne[0], inactive->ne[1], length, 96); // [b*96, t, h/vae_scale_factor, w/vae_scale_factor] @@ -4043,7 +4043,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s int W = width / vae_scale_factor; int H = height / vae_scale_factor; - int T = init_latent->ne[2]; + int T = static_cast(init_latent->ne[2]); int C = sd_ctx->sd->get_latent_channel(); struct ggml_tensor* final_latent; @@ -4162,13 +4162,13 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s ggml_free(work_ctx); return nullptr; } - *num_frames_out = vid->ne[2]; + *num_frames_out = static_cast(vid->ne[2]); - for (size_t i = 0; i < vid->ne[2]; i++) { - result_images[i].width = vid->ne[0]; - result_images[i].height = vid->ne[1]; + for (int64_t i = 0; i < vid->ne[2]; i++) { + result_images[i].width = static_cast(vid->ne[0]); + result_images[i].height = static_cast(vid->ne[1]); result_images[i].channel = 3; - result_images[i].data = ggml_tensor_to_sd_image(vid, i, true); + result_images[i].data = ggml_tensor_to_sd_image(vid, static_cast(i), true); } ggml_free(work_ctx); diff --git a/t5.hpp b/t5.hpp index 4370a5670..2e3e4b560 100644 --- a/t5.hpp +++ b/t5.hpp @@ -96,7 +96,7 @@ class T5UniGramTokenizer { try { data = nlohmann::json::parse(json_str); - } catch (const nlohmann::json::parse_error& e) { + } catch (const nlohmann::json::parse_error&) { status_ = INVLIAD_JSON; return; } @@ -168,9 +168,9 @@ class T5UniGramTokenizer { kMaxTrieResultsSize); trie_results_size_ = 0; for (const auto& p : *pieces) { - const int num_nodes = trie_->commonPrefixSearch( + const size_t num_nodes = trie_->commonPrefixSearch( p.first.data(), results.data(), results.size(), p.first.size()); - trie_results_size_ = std::max(trie_results_size_, num_nodes); + trie_results_size_ = std::max(trie_results_size_, static_cast(num_nodes)); } if (trie_results_size_ == 0) @@ -268,7 +268,7 @@ class T5UniGramTokenizer { -1; // The starting position (in utf-8) of this node. The entire best // path can be constructed by backtracking along this link. }; - const int size = normalized.size(); + const int size = static_cast(normalized.size()); const float unk_score = min_score() - kUnkPenalty; // The ends are exclusive. std::vector best_path_ends_at(size + 1); @@ -281,7 +281,7 @@ class T5UniGramTokenizer { best_path_ends_at[starts_at].best_path_score; bool has_single_node = false; const int mblen = - std::min(OneCharLen(normalized.data() + starts_at), + std::min(static_cast(OneCharLen(normalized.data() + starts_at)), size - starts_at); while (key_pos < size) { const int ret = @@ -302,7 +302,7 @@ class T5UniGramTokenizer { score + best_path_score_till_here; if (target_node.starts_at == -1 || candidate_best_path_score > target_node.best_path_score) { - target_node.best_path_score = candidate_best_path_score; + target_node.best_path_score = static_cast(candidate_best_path_score); target_node.starts_at = starts_at; target_node.id = ret; } @@ -394,7 +394,7 @@ class T5UniGramTokenizer { bool padding = false) { if (max_length > 0 && padding) { size_t orig_token_num = tokens.size() - 1; - size_t n = std::ceil(orig_token_num * 1.0 / (max_length - 1)); + size_t n = static_cast(std::ceil(orig_token_num * 1.0 / (max_length - 1))); if (n == 0) { n = 1; } @@ -608,7 +608,7 @@ class T5Attention : public GGMLBlock { } } - k = ggml_scale_inplace(ctx->ggml_ctx, k, sqrt(d_head)); + k = ggml_scale_inplace(ctx->ggml_ctx, k, ::sqrtf(static_cast(d_head))); x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, mask); // [N, n_token, d_head * n_head] @@ -797,7 +797,7 @@ struct T5Runner : public GGMLRunner { input_ids = to_backend(input_ids); attention_mask = to_backend(attention_mask); - relative_position_bucket_vec = compute_relative_position_bucket(input_ids->ne[0], input_ids->ne[0]); + relative_position_bucket_vec = compute_relative_position_bucket(static_cast(input_ids->ne[0]), static_cast(input_ids->ne[0])); // for (int i = 0; i < relative_position_bucket_vec.size(); i++) { // if (i % 77 == 0) { @@ -984,12 +984,12 @@ struct T5Embedder { auto attention_mask = vector_to_ggml_tensor(work_ctx, masks); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.compute(8, input_ids, attention_mask, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("t5 test done in %dms", t1 - t0); + LOG_DEBUG("t5 test done in %lldms", t1 - t0); } } diff --git a/thirdparty/darts.h b/thirdparty/darts.h index 7c25326a0..bd535cd5c 100644 --- a/thirdparty/darts.h +++ b/thirdparty/darts.h @@ -845,7 +845,7 @@ inline void BitVector::build() { num_ones_ = 0; for (std::size_t i = 0; i < units_.size(); ++i) { - ranks_[i] = num_ones_; + ranks_[i] = static_cast(num_ones_); num_ones_ += pop_count(units_[i]); } } @@ -1769,7 +1769,7 @@ id_type DoubleArrayBuilder::arrange_from_keyset(const Keyset &keyset, inline id_type DoubleArrayBuilder::find_valid_offset(id_type id) const { if (extras_head_ >= units_.size()) { - return units_.size() | (id & LOWER_MASK); + return static_cast(units_.size()) | (id & LOWER_MASK); } id_type unfixed_id = extras_head_; @@ -1781,7 +1781,7 @@ inline id_type DoubleArrayBuilder::find_valid_offset(id_type id) const { unfixed_id = extras(unfixed_id).next(); } while (unfixed_id != extras_head_); - return units_.size() | (id & LOWER_MASK); + return static_cast(units_.size()) | (id & LOWER_MASK); } inline bool DoubleArrayBuilder::is_valid_offset(id_type id, @@ -1812,7 +1812,7 @@ inline void DoubleArrayBuilder::reserve_id(id_type id) { if (id == extras_head_) { extras_head_ = extras(id).next(); if (extras_head_ == id) { - extras_head_ = units_.size(); + extras_head_ = static_cast(units_.size()); } } extras(extras(id).prev()).set_next(extras(id).next()); @@ -1821,8 +1821,8 @@ inline void DoubleArrayBuilder::reserve_id(id_type id) { } inline void DoubleArrayBuilder::expand_units() { - id_type src_num_units = units_.size(); - id_type src_num_blocks = num_blocks(); + id_type src_num_units = static_cast(units_.size()); + id_type src_num_blocks = static_cast(num_blocks()); id_type dest_num_units = src_num_units + BLOCK_SIZE; id_type dest_num_blocks = src_num_blocks + 1; @@ -1834,7 +1834,7 @@ inline void DoubleArrayBuilder::expand_units() { units_.resize(dest_num_units); if (dest_num_blocks > NUM_EXTRA_BLOCKS) { - for (std::size_t id = src_num_units; id < dest_num_units; ++id) { + for (id_type id = src_num_units; id < dest_num_units; ++id) { extras(id).set_is_used(false); extras(id).set_is_fixed(false); } @@ -1858,9 +1858,9 @@ inline void DoubleArrayBuilder::expand_units() { inline void DoubleArrayBuilder::fix_all_blocks() { id_type begin = 0; if (num_blocks() > NUM_EXTRA_BLOCKS) { - begin = num_blocks() - NUM_EXTRA_BLOCKS; + begin = static_cast(num_blocks() - NUM_EXTRA_BLOCKS); } - id_type end = num_blocks(); + id_type end = static_cast(num_blocks()); for (id_type block_id = begin; block_id != end; ++block_id) { fix_block(block_id); diff --git a/thirdparty/stb_image_write.h b/thirdparty/stb_image_write.h index 55118853e..9128a3131 100644 --- a/thirdparty/stb_image_write.h +++ b/thirdparty/stb_image_write.h @@ -257,6 +257,10 @@ int stbi_write_tga_with_rle = 1; int stbi_write_force_png_filter = -1; #endif +#ifndef STBMIN +#define STBMIN(a, b) ((a) < (b) ? (a) : (b)) +#endif // STBMIN + static int stbi__flip_vertically_on_write = 0; STBIWDEF void stbi_flip_vertically_on_write(int flag) @@ -1179,8 +1183,8 @@ STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int s if (!zlib) return 0; if(parameters != NULL) { - param_length = strlen(parameters); - param_length += strlen("parameters") + 1; // For the name and the null-byte + param_length = (int)strlen(parameters); + param_length += (int)strlen("parameters") + 1; // For the name and the null-byte } // each tag requires 12 bytes of overhead @@ -1526,11 +1530,11 @@ static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, in if(parameters != NULL) { stbiw__putc(s, 0xFF /* comnent */ ); stbiw__putc(s, 0xFE /* marker */ ); - size_t param_length = std::min(2 + strlen("parameters") + 1 + strlen(parameters) + 1, (size_t) 0xFFFF); + int param_length = STBMIN(2 + (int)strlen("parameters") + 1 + (int)strlen(parameters) + 1, 0xFFFF); stbiw__putc(s, param_length >> 8); // no need to mask, length < 65536 stbiw__putc(s, param_length & 0xFF); - s->func(s->context, (void*)"parameters", strlen("parameters") + 1); // std::string is zero-terminated - s->func(s->context, (void*)parameters, std::min(param_length, (size_t) 65534) - 2 - strlen("parameters") - 1); + s->func(s->context, (void*)"parameters", (int)strlen("parameters") + 1); // std::string is zero-terminated + s->func(s->context, (void*)parameters, STBMIN(param_length, 65534) - 2 - (int)strlen("parameters") - 1); if(param_length > 65534) stbiw__putc(s, 0); // always zero-terminate for safety if(param_length & 1) stbiw__putc(s, 0xFF); // pad to even length } diff --git a/unet.hpp b/unet.hpp index ec7578e4b..eef204d07 100644 --- a/unet.hpp +++ b/unet.hpp @@ -12,7 +12,7 @@ class SpatialVideoTransformer : public SpatialTransformer { protected: int64_t time_depth; - int64_t max_time_embed_period; + int max_time_embed_period; public: SpatialVideoTransformer(int64_t in_channels, @@ -21,8 +21,8 @@ class SpatialVideoTransformer : public SpatialTransformer { int64_t depth, int64_t context_dim, bool use_linear, - int64_t time_depth = 1, - int64_t max_time_embed_period = 10000) + int64_t time_depth = 1, + int max_time_embed_period = 10000) : SpatialTransformer(in_channels, n_head, d_head, depth, context_dim, use_linear), max_time_embed_period(max_time_embed_period) { // We will convert unet transformer linear to conv2d 1x1 when loading the weights, so use_linear is always False @@ -112,9 +112,9 @@ class SpatialVideoTransformer : public SpatialTransformer { x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 1, 2, 0, 3)); // [N, h, w, inner_dim] x = ggml_reshape_3d(ctx->ggml_ctx, x, inner_dim, w * h, n); // [N, h * w, inner_dim] - auto num_frames = ggml_arange(ctx->ggml_ctx, 0, timesteps, 1); + auto num_frames = ggml_arange(ctx->ggml_ctx, 0.f, static_cast(timesteps), 1.f); // since b is 1, no need to do repeat - auto t_emb = ggml_ext_timestep_embedding(ctx->ggml_ctx, num_frames, in_channels, max_time_embed_period); // [N, in_channels] + auto t_emb = ggml_ext_timestep_embedding(ctx->ggml_ctx, num_frames, static_cast(in_channels), max_time_embed_period); // [N, in_channels] auto emb = time_pos_embed_0->forward(ctx, t_emb); emb = ggml_silu_inplace(ctx->ggml_ctx, emb); @@ -526,7 +526,7 @@ class UnetModelBlock : public GGMLBlock { auto cs = ggml_scale_inplace(ctx->ggml_ctx, controls[controls.size() - 1], control_strength); h = ggml_add(ctx->ggml_ctx, h, cs); // middle control } - int control_offset = controls.size() - 2; + int control_offset = static_cast(controls.size() - 2); // output_blocks int output_block_idx = 0; @@ -615,7 +615,7 @@ struct UNetModelRunner : public GGMLRunner { struct ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE); if (num_video_frames == -1) { - num_video_frames = x->ne[3]; + num_video_frames = static_cast(x->ne[3]); } x = to_backend(x); @@ -700,12 +700,12 @@ struct UNetModelRunner : public GGMLRunner { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("unet test done in %dms", t1 - t0); + LOG_DEBUG("unet test done in %lldms", t1 - t0); } } }; diff --git a/util.cpp b/util.cpp index efb5e4949..a94cfd986 100644 --- a/util.cpp +++ b/util.cpp @@ -488,7 +488,7 @@ sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image) { // Allocate memory for float data converted_image.data = (float*)malloc(image.width * image.height * image.channel * sizeof(float)); - for (int i = 0; i < image.width * image.height * image.channel; i++) { + for (uint32_t i = 0; i < image.width * image.height * image.channel; i++) { // Convert uint8_t to float converted_image.data[i] = (float)image.data[i]; } @@ -520,7 +520,7 @@ sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int uint32_t x2 = std::min(x1 + 1, image.width - 1); uint32_t y2 = std::min(y1 + 1, image.height - 1); - for (int k = 0; k < image.channel; k++) { + for (uint32_t k = 0; k < image.channel; k++) { float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k); float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k); float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k); @@ -540,9 +540,9 @@ sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int } void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]) { - for (int y = 0; y < image.height; y++) { - for (int x = 0; x < image.width; x++) { - for (int k = 0; k < image.channel; k++) { + for (uint32_t y = 0; y < image.height; y++) { + for (uint32_t x = 0; x < image.width; x++) { + for (uint32_t k = 0; k < image.channel; k++) { int index = (y * image.width + x) * image.channel + k; image.data[index] = (image.data[index] - means[k]) / stds[k]; } @@ -551,8 +551,8 @@ void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3 } // Constants for means and std -float means[3] = {0.48145466, 0.4578275, 0.40821073}; -float stds[3] = {0.26862954, 0.26130258, 0.27577711}; +float means[3] = {0.48145466f, 0.4578275f, 0.40821073f}; +float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f}; // Function to clip and preprocess sd_image_f32_t sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height) { @@ -576,7 +576,7 @@ sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int targe uint32_t x2 = std::min(x1 + 1, image.width - 1); uint32_t y2 = std::min(y1 + 1, image.height - 1); - for (int k = 0; k < image.channel; k++) { + for (uint32_t k = 0; k < image.channel; k++) { float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k); float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k); float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k); @@ -602,11 +602,11 @@ sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int targe result.channel = image.channel; result.data = (float*)malloc(target_height * target_width * image.channel * sizeof(float)); - for (int k = 0; k < image.channel; k++) { - for (int i = 0; i < result.height; i++) { - for (int j = 0; j < result.width; j++) { - int src_y = std::min(i + h_offset, resized_height - 1); - int src_x = std::min(j + w_offset, resized_width - 1); + for (uint32_t k = 0; k < image.channel; k++) { + for (uint32_t i = 0; i < result.height; i++) { + for (uint32_t j = 0; j < result.width; j++) { + int src_y = std::min(static_cast(i + h_offset), resized_height - 1); + int src_x = std::min(static_cast(j + w_offset), resized_width - 1); *(result.data + i * result.width * image.channel + j * image.channel + k) = fmin(fmax(*(resized_data + src_y * resized_width * image.channel + src_x * image.channel + k), 0.0f), 255.0f) / 255.0f; } @@ -617,9 +617,9 @@ sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int targe free(resized_data); // Normalize - for (int k = 0; k < image.channel; k++) { - for (int i = 0; i < result.height; i++) { - for (int j = 0; j < result.width; j++) { + for (uint32_t k = 0; k < image.channel; k++) { + for (uint32_t i = 0; i < result.height; i++) { + for (uint32_t j = 0; j < result.width; j++) { // *(result.data + i * size * image.channel + j * image.channel + k) = 0.5f; int offset = i * result.width * image.channel + j * image.channel + k; float value = *(result.data + offset); diff --git a/vae.hpp b/vae.hpp index ad5db1b57..cd055aa86 100644 --- a/vae.hpp +++ b/vae.hpp @@ -166,18 +166,18 @@ class AE3DConv : public Conv2d { AE3DConv(int64_t in_channels, int64_t out_channels, std::pair kernel_size, - int64_t video_kernel_size = 3, + int video_kernel_size = 3, std::pair stride = {1, 1}, std::pair padding = {0, 0}, std::pair dilation = {1, 1}, bool bias = true) : Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias) { - int64_t kernel_padding = video_kernel_size / 2; - blocks["time_mix_conv"] = std::shared_ptr(new Conv3dnx1x1(out_channels, - out_channels, - video_kernel_size, - 1, - kernel_padding)); + int kernel_padding = video_kernel_size / 2; + blocks["time_mix_conv"] = std::shared_ptr(new Conv3d(out_channels, + out_channels, + {video_kernel_size, 1, 1}, + {1, 1, 1}, + {kernel_padding, 0, 0})); } struct ggml_tensor* forward(GGMLRunnerContext* ctx, @@ -186,7 +186,7 @@ class AE3DConv : public Conv2d { // skip_video always False // x: [N, IC, IH, IW] // result: [N, OC, OH, OW] - auto time_mix_conv = std::dynamic_pointer_cast(blocks["time_mix_conv"]); + auto time_mix_conv = std::dynamic_pointer_cast(blocks["time_mix_conv"]); x = Conv2d::forward(ctx, x); // timesteps = x.shape[0] @@ -409,8 +409,8 @@ class Decoder : public GGMLBlock { z_channels(z_channels), video_decoder(video_decoder), video_kernel_size(video_kernel_size) { - size_t num_resolutions = ch_mult.size(); - int block_in = ch * ch_mult[num_resolutions - 1]; + int num_resolutions = static_cast(ch_mult.size()); + int block_in = ch * ch_mult[num_resolutions - 1]; blocks["conv_in"] = std::shared_ptr(new Conv2d(z_channels, block_in, {3, 3}, {1, 1}, {1, 1})); @@ -461,7 +461,7 @@ class Decoder : public GGMLBlock { h = mid_block_2->forward(ctx, h); // [N, block_in, h, w] // upsampling - size_t num_resolutions = ch_mult.size(); + int num_resolutions = static_cast(ch_mult.size()); for (int i = num_resolutions - 1; i >= 0; i--) { for (int j = 0; j < num_res_blocks + 1; j++) { std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j); @@ -745,12 +745,12 @@ struct AutoEncoderKL : public VAE { print_ggml_tensor(x); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("encode test done in %dms", t1 - t0); + LOG_DEBUG("encode test done in %lldms", t1 - t0); } if (false) { @@ -763,12 +763,12 @@ struct AutoEncoderKL : public VAE { print_ggml_tensor(z); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, z, true, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("decode test done in %dms", t1 - t0); + LOG_DEBUG("decode test done in %lldms", t1 - t0); } }; }; diff --git a/wan.hpp b/wan.hpp index 31ecf33f7..936fb6f34 100644 --- a/wan.hpp +++ b/wan.hpp @@ -108,7 +108,7 @@ namespace WAN { struct ggml_tensor* w = params["gamma"]; w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w)); auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC] - h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12); + h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f); h = ggml_mul(ctx->ggml_ctx, h, w); h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, h, 1, 2, 3, 0)); @@ -243,13 +243,13 @@ namespace WAN { protected: int64_t in_channels; int64_t out_channels; - int64_t factor_t; - int64_t factor_s; - int64_t factor; + int factor_t; + int factor_s; + int factor; int64_t group_size; public: - AvgDown3D(int64_t in_channels, int64_t out_channels, int64_t factor_t, int64_t factor_s = 1) + AvgDown3D(int64_t in_channels, int64_t out_channels, int factor_t, int factor_s = 1) : in_channels(in_channels), out_channels(out_channels), factor_t(factor_t), factor_s(factor_s) { factor = factor_t * factor_s * factor_s; GGML_ASSERT(in_channels * factor % out_channels == 0); @@ -266,7 +266,7 @@ namespace WAN { int64_t H = x->ne[1]; int64_t W = x->ne[0]; - int64_t pad_t = (factor_t - T % factor_t) % factor_t; + int pad_t = (factor_t - T % factor_t) % factor_t; x = ggml_pad_ext(ctx->ggml_ctx, x, 0, 0, 0, 0, pad_t, 0, 0, 0); T = x->ne[2]; @@ -1071,7 +1071,7 @@ namespace WAN { int64_t iter_ = z->ne[2]; auto x = conv2->forward(ctx, z); struct ggml_tensor* out; - for (int64_t i = 0; i < iter_; i++) { + for (int i = 0; i < iter_; i++) { _conv_idx = 0; if (i == 0) { auto in = ggml_ext_slice(ctx->ggml_ctx, x, 2, i, i + 1); // [b*c, 1, h, w] @@ -1091,7 +1091,7 @@ namespace WAN { struct ggml_tensor* decode_partial(GGMLRunnerContext* ctx, struct ggml_tensor* z, - int64_t i, + int i, int64_t b = 1) { // z: [b*c, t, h, w] GGML_ASSERT(b == 1); @@ -1146,12 +1146,12 @@ namespace WAN { return gf; } - struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int64_t i) { + struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int i) { struct ggml_cgraph* gf = new_graph_custom(20480); ae.clear_cache(); - for (int64_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { + for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { auto feat_cache = get_cache_tensor_by_name("feat_idx:" + std::to_string(feat_idx)); ae._feat_map[feat_idx] = feat_cache; } @@ -1162,7 +1162,7 @@ namespace WAN { struct ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z); - for (int64_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { + for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { ggml_tensor* feat_cache = ae._feat_map[feat_idx]; if (feat_cache != nullptr) { cache("feat_idx:" + std::to_string(feat_idx), feat_cache); @@ -1188,7 +1188,7 @@ namespace WAN { } else { // chunk 1 result is weird ae.clear_cache(); int64_t t = z->ne[2]; - int64_t i = 0; + int i = 0; auto get_graph = [&]() -> struct ggml_cgraph* { return build_graph_partial(z, decode_graph, i); }; @@ -1499,7 +1499,7 @@ namespace WAN { class WanAttentionBlock : public GGMLBlock { protected: - int dim; + int64_t dim; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); @@ -1639,7 +1639,7 @@ namespace WAN { class Head : public GGMLBlock { protected: - int dim; + int64_t dim; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); @@ -1685,8 +1685,8 @@ namespace WAN { class MLPProj : public GGMLBlock { protected: - int in_dim; - int flf_pos_embed_token_number; + int64_t in_dim; + int64_t flf_pos_embed_token_number; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { if (flf_pos_embed_token_number > 0) { @@ -1739,17 +1739,17 @@ namespace WAN { int64_t in_dim = 16; int64_t dim = 2048; int64_t ffn_dim = 8192; - int64_t freq_dim = 256; + int freq_dim = 256; int64_t text_dim = 4096; int64_t out_dim = 16; int64_t num_heads = 16; - int64_t num_layers = 32; - int64_t vace_layers = 0; + int num_layers = 32; + int vace_layers = 0; int64_t vace_in_dim = 96; std::map vace_layers_mapping = {}; bool qk_norm = true; bool cross_attn_norm = true; - float eps = 1e-6; + float eps = 1e-6f; int64_t flf_pos_embed_token_number = 0; int theta = 10000; // wan2.1 1.3B: 1536/12, wan2.1/2.2 14B: 5120/40, wan2.2 5B: 3074/24 @@ -2066,7 +2066,7 @@ namespace WAN { if (version == VERSION_WAN2_2_TI2V) { desc = "Wan2.2-TI2V-5B"; wan_params.dim = 3072; - wan_params.eps = 1e-06; + wan_params.eps = 1e-06f; wan_params.ffn_dim = 14336; wan_params.freq_dim = 256; wan_params.in_dim = 48; @@ -2085,7 +2085,7 @@ namespace WAN { wan_params.in_dim = 16; } wan_params.dim = 1536; - wan_params.eps = 1e-06; + wan_params.eps = 1e-06f; wan_params.ffn_dim = 8960; wan_params.freq_dim = 256; wan_params.num_heads = 12; @@ -2114,14 +2114,14 @@ namespace WAN { } } wan_params.dim = 5120; - wan_params.eps = 1e-06; + wan_params.eps = 1e-06f; wan_params.ffn_dim = 13824; wan_params.freq_dim = 256; wan_params.num_heads = 40; wan_params.out_dim = 16; wan_params.text_len = 512; } else { - GGML_ABORT("invalid num_layers(%ld) of wan", wan_params.num_layers); + GGML_ABORT("invalid num_layers(%d) of wan", wan_params.num_layers); } LOG_INFO("%s", desc.c_str()); @@ -2156,16 +2156,16 @@ namespace WAN { time_dim_concat = to_backend(time_dim_concat); vace_context = to_backend(vace_context); - pe_vec = Rope::gen_wan_pe(x->ne[2], - x->ne[1], - x->ne[0], + pe_vec = Rope::gen_wan_pe(static_cast(x->ne[2]), + static_cast(x->ne[1]), + static_cast(x->ne[0]), std::get<0>(wan_params.patch_size), std::get<1>(wan_params.patch_size), std::get<2>(wan_params.patch_size), 1, wan_params.theta, wan_params.axes_dim); - int pos_len = pe_vec.size() / wan_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / wan_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, wan_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -2243,12 +2243,12 @@ namespace WAN { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("wan test done in %dms", t1 - t0); + LOG_DEBUG("wan test done in %lldms", t1 - t0); } } diff --git a/z_image.hpp b/z_image.hpp index af8d57e04..0abc78320 100644 --- a/z_image.hpp +++ b/z_image.hpp @@ -239,7 +239,7 @@ namespace ZImage { }; struct ZImageParams { - int64_t patch_size = 2; + int patch_size = 2; int64_t hidden_size = 3840; int64_t in_channels = 16; int64_t out_channels = 16; @@ -249,11 +249,11 @@ namespace ZImage { int64_t num_heads = 30; int64_t num_kv_heads = 30; int64_t multiple_of = 256; - float ffn_dim_multiplier = 8.0 / 3.0f; + float ffn_dim_multiplier = 8.0f / 3.0f; float norm_eps = 1e-5f; bool qk_norm = true; int64_t cap_feat_dim = 2560; - float theta = 256.f; + int theta = 256; std::vector axes_dim = {32, 48, 48}; int64_t axes_dim_sum = 128; }; @@ -411,13 +411,13 @@ namespace ZImage { auto txt = cap_embedder_1->forward(ctx, cap_embedder_0->forward(ctx, context)); // [N, n_txt_token, hidden_size] auto img = x_embedder->forward(ctx, x); // [N, n_img_token, hidden_size] - int64_t n_txt_pad_token = Rope::bound_mod(n_txt_token, SEQ_MULTI_OF); + int64_t n_txt_pad_token = Rope::bound_mod(static_cast(n_txt_token), SEQ_MULTI_OF); if (n_txt_pad_token > 0) { auto txt_pad_tokens = ggml_repeat_4d(ctx->ggml_ctx, txt_pad_token, txt_pad_token->ne[0], n_txt_pad_token, N, 1); txt = ggml_concat(ctx->ggml_ctx, txt, txt_pad_tokens, 1); // [N, n_txt_token + n_txt_pad_token, hidden_size] } - int64_t n_img_pad_token = Rope::bound_mod(n_img_token, SEQ_MULTI_OF); + int64_t n_img_pad_token = Rope::bound_mod(static_cast(n_img_token), SEQ_MULTI_OF); if (n_img_pad_token > 0) { auto img_pad_tokens = ggml_repeat_4d(ctx->ggml_ctx, img_pad_token, img_pad_token->ne[0], n_img_pad_token, N, 1); img = ggml_concat(ctx->ggml_ctx, img, img_pad_tokens, 1); // [N, n_img_token + n_img_pad_token, hidden_size] @@ -543,11 +543,11 @@ namespace ZImage { ref_latents[i] = to_backend(ref_latents[i]); } - pe_vec = Rope::gen_z_image_pe(x->ne[1], - x->ne[0], + pe_vec = Rope::gen_z_image_pe(static_cast(x->ne[1]), + static_cast(x->ne[0]), z_image_params.patch_size, - x->ne[3], - context->ne[1], + static_cast(x->ne[3]), + static_cast(context->ne[1]), SEQ_MULTI_OF, ref_latents, increase_ref_index, @@ -555,7 +555,7 @@ namespace ZImage { circular_y_enabled, circular_x_enabled, z_image_params.axes_dim); - int pos_len = pe_vec.size() / z_image_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / z_image_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, z_image_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -619,12 +619,12 @@ namespace ZImage { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, {}, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("z_image test done in %dms", t1 - t0); + LOG_DEBUG("z_image test done in %lldms", t1 - t0); } }