Skip to content

Commit a4a3541

Browse files
chore(core): Consistently use std::size_t
Quite a few functions took `int` parameters just to cast them to `std::size_t` everywhere.
1 parent 928b269 commit a4a3541

File tree

10 files changed

+311
-398
lines changed

10 files changed

+311
-398
lines changed

core/include/gprat/cpu/gp_functions.hpp

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,9 @@ namespace cpu
3030
std::vector<mutable_tile_data<double>>
3131
cholesky(const std::vector<double> &training_input,
3232
const SEKParams &sek_params,
33-
int n_tiles,
34-
int n_tile_size,
35-
int n_regressors);
33+
std::size_t n_tiles,
34+
std::size_t n_tile_size,
35+
std::size_t n_regressors);
3636

3737
/**
3838
* @brief Compute the predictions without uncertainties.
@@ -54,11 +54,11 @@ predict(const std::vector<double> &training_input,
5454
const std::vector<double> &training_output,
5555
const std::vector<double> &test_input,
5656
const SEKParams &sek_params,
57-
int n_tiles,
58-
int n_tile_size,
59-
int m_tiles,
60-
int m_tile_size,
61-
int n_regressors);
57+
std::size_t n_tiles,
58+
std::size_t n_tile_size,
59+
std::size_t m_tiles,
60+
std::size_t m_tile_size,
61+
std::size_t n_regressors);
6262

6363
/**
6464
* @brief Compute the predictions with uncertainties.
@@ -80,11 +80,11 @@ std::vector<std::vector<double>> predict_with_uncertainty(
8080
const std::vector<double> &training_output,
8181
const std::vector<double> &test_input,
8282
const SEKParams &sek_params,
83-
int n_tiles,
84-
int n_tile_size,
85-
int m_tiles,
86-
int m_tile_size,
87-
int n_regressors);
83+
std::size_t n_tiles,
84+
std::size_t n_tile_size,
85+
std::size_t m_tiles,
86+
std::size_t m_tile_size,
87+
std::size_t n_regressors);
8888

8989
/**
9090
* @brief Compute the predictions with full covariance matrix.
@@ -106,11 +106,11 @@ std::vector<std::vector<double>> predict_with_full_cov(
106106
const std::vector<double> &training_output,
107107
const std::vector<double> &test_data,
108108
const SEKParams &sek_params,
109-
int n_tiles,
110-
int n_tile_size,
111-
int m_tiles,
112-
int m_tile_size,
113-
int n_regressors);
109+
std::size_t n_tiles,
110+
std::size_t n_tile_size,
111+
std::size_t m_tiles,
112+
std::size_t m_tile_size,
113+
std::size_t n_regressors);
114114

115115
/**
116116
* @brief Compute loss for given data and Gaussian process model
@@ -127,9 +127,9 @@ std::vector<std::vector<double>> predict_with_full_cov(
127127
double compute_loss(const std::vector<double> &training_input,
128128
const std::vector<double> &training_output,
129129
const SEKParams &sek_params,
130-
int n_tiles,
131-
int n_tile_size,
132-
int n_regressors);
130+
std::size_t n_tiles,
131+
std::size_t n_tile_size,
132+
std::size_t n_regressors);
133133

134134
/**
135135
* @brief Perform optimization for a given number of iterations

core/include/gprat/cpu/tiled_algorithms.hpp

Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ namespace cpu
2828
* @param N Tile size per dimension.
2929
* @param n_tiles Number of tiles per dimension.
3030
*/
31-
void right_looking_cholesky_tiled(Tiled_matrix &ft_tiles, int N, std::size_t n_tiles);
31+
void right_looking_cholesky_tiled(Tiled_matrix &ft_tiles, std::size_t N, std::size_t n_tiles);
3232

3333
// Tiled Triangular Solve Algorithms
3434

@@ -40,7 +40,7 @@ void right_looking_cholesky_tiled(Tiled_matrix &ft_tiles, int N, std::size_t n_t
4040
* @param N Tile size per dimension.
4141
* @param n_tiles Number of tiles per dimension.
4242
*/
43-
void forward_solve_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_rhs, int N, std::size_t n_tiles);
43+
void forward_solve_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_rhs, std::size_t N, std::size_t n_tiles);
4444

4545
/**
4646
* @brief Perform tiled backward triangular matrix-vector solve.
@@ -50,7 +50,7 @@ void forward_solve_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_rhs, int N, st
5050
* @param N Tile size per dimension.
5151
* @param n_tiles Number of tiles per dimension.
5252
*/
53-
void backward_solve_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_rhs, int N, std::size_t n_tiles);
53+
void backward_solve_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_rhs, std::size_t N, std::size_t n_tiles);
5454

5555
/**
5656
* @brief Perform tiled forward triangular matrix-matrix solve.
@@ -62,8 +62,12 @@ void backward_solve_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_rhs, int N, s
6262
* @param n_tiles Number of tiles in first dimension.
6363
* @param m_tiles Number of tiles in second dimension.
6464
*/
65-
void forward_solve_tiled_matrix(
66-
Tiled_matrix &ft_tiles, Tiled_matrix &ft_rhs, int N, int M, std::size_t n_tiles, std::size_t m_tiles);
65+
void forward_solve_tiled_matrix(Tiled_matrix &ft_tiles,
66+
Tiled_matrix &ft_rhs,
67+
std::size_t N,
68+
std::size_t M,
69+
std::size_t n_tiles,
70+
std::size_t m_tiles);
6771

6872
/**
6973
* @brief Perform tiled backward triangular matrix-matrix solve.
@@ -75,15 +79,19 @@ void forward_solve_tiled_matrix(
7579
* @param n_tiles Number of tiles in first dimension.
7680
* @param m_tiles Number of tiles in second dimension.
7781
*/
78-
void backward_solve_tiled_matrix(
79-
Tiled_matrix &ft_tiles, Tiled_matrix &ft_rhs, int N, int M, std::size_t n_tiles, std::size_t m_tiles);
82+
void backward_solve_tiled_matrix(Tiled_matrix &ft_tiles,
83+
Tiled_matrix &ft_rhs,
84+
std::size_t N,
85+
std::size_t M,
86+
std::size_t n_tiles,
87+
std::size_t m_tiles);
8088

8189
/**
8290
* @brief Perform tiled matrix-vector multiplication
8391
*
8492
* @param ft_tiles Tiled matrix represented as a vector of futurized tiles.
8593
* @param ft_vector Tiled vector represented as a vector of futurized tiles.
86-
* @param ft_rhsTiled solution represented as a vector of futurized tiles.
94+
* @param ft_rhs Tiled solution represented as a vector of futurized tiles.
8795
* @param N_row Tile size of first dimension.
8896
* @param N_col Tile size of second dimension.
8997
* @param n_tiles Number of tiles in first dimension.
@@ -92,8 +100,8 @@ void backward_solve_tiled_matrix(
92100
void matrix_vector_tiled(Tiled_matrix &ft_tiles,
93101
Tiled_vector &ft_vector,
94102
Tiled_vector &ft_rhs,
95-
int N_row,
96-
int N_col,
103+
std::size_t N_row,
104+
std::size_t N_col,
97105
std::size_t n_tiles,
98106
std::size_t m_tiles);
99107

@@ -108,7 +116,12 @@ void matrix_vector_tiled(Tiled_matrix &ft_tiles,
108116
* @param m_tiles Number of tiles in second dimension.
109117
*/
110118
void symmetric_matrix_matrix_diagonal_tiled(
111-
Tiled_matrix &ft_tiles, Tiled_vector &ft_vector, int N, int M, std::size_t n_tiles, std::size_t m_tiles);
119+
Tiled_matrix &ft_tiles,
120+
Tiled_vector &ft_vector,
121+
std::size_t N,
122+
std::size_t M,
123+
std::size_t n_tiles,
124+
std::size_t m_tiles);
112125

113126
/**
114127
* @brief Perform tiled symmetric k-rank update (ft_tiles^T * ft_tiles)
@@ -120,18 +133,21 @@ void symmetric_matrix_matrix_diagonal_tiled(
120133
* @param n_tiles Number of tiles in first dimension.
121134
* @param m_tiles Number of tiles in second dimension.
122135
*/
123-
void symmetric_matrix_matrix_tiled(
124-
Tiled_matrix &ft_tiles, Tiled_matrix &ft_result, int N, int M, std::size_t n_tiles, std::size_t m_tiles);
136+
void symmetric_matrix_matrix_tiled(Tiled_matrix &ft_tiles,
137+
Tiled_matrix &ft_result,
138+
std::size_t N,
139+
std::size_t M,
140+
std::size_t n_tiles,
141+
std::size_t m_tiles);
125142

126143
/**
127144
* @brief Compute the difference between two tiled vectors
128145
* @param ft_minuend Tiled vector that is being subtracted from.
129146
* @param ft_subtrahend Tiled vector that is being subtracted.
130-
* @param ft_difference Tiled vector that contains the result of the substraction.
131147
* @param M Tile size dimension.
132148
* @param m_tiles Number of tiles.
133149
*/
134-
void vector_difference_tiled(Tiled_vector &ft_minuend, Tiled_vector &ft_substrahend, int M, std::size_t m_tiles);
150+
void vector_difference_tiled(Tiled_vector &ft_minuend, Tiled_vector &ft_subtrahend, std::size_t M, std::size_t m_tiles);
135151

136152
/**
137153
* @brief Extract the tiled diagonals of a tiled matrix
@@ -140,7 +156,7 @@ void vector_difference_tiled(Tiled_vector &ft_minuend, Tiled_vector &ft_substrah
140156
* @param M Tile size per dimension.
141157
* @param m_tiles Number of tiles per dimension.
142158
*/
143-
void matrix_diagonal_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_vector, int M, std::size_t m_tiles);
159+
void matrix_diagonal_tiled(Tiled_matrix &ft_tiles, Tiled_vector &ft_vector, std::size_t M, std::size_t m_tiles);
144160

145161
/**
146162
* @brief Compute the negative log likelihood loss with a tiled covariance matrix K.
@@ -158,14 +174,14 @@ void compute_loss_tiled(Tiled_matrix &ft_tiles,
158174
Tiled_vector &ft_alpha,
159175
Tiled_vector &ft_y,
160176
hpx::shared_future<double> &loss,
161-
int N,
177+
std::size_t N,
162178
std::size_t n_tiles);
163179

164180
/**
165181
* @brief Updates a hyperparameter of the SEK kernel using Adam
166182
*
167183
* @param ft_invK Tiled inverse of the covariance matrix K represented as a vector of futurized tiles.
168-
* @param ft_grad_param Tiled covariance matrix gradient w.r.t. a hyperparameter.
184+
* @param ft_gradK_param Tiled covariance matrix gradient w.r.t. a hyperparameter.
169185
* @param ft_alpha Tiled vector containing the precomputed inv(K) * y where y is the training output.
170186
* @param adam_params Hyperparameter of the Adam optimizer
171187
* @param sek_params Hyperparameters of the SEK kernel
@@ -180,7 +196,7 @@ void update_hyperparameter_tiled(
180196
const Tiled_vector &ft_alpha,
181197
const AdamParams &adam_params,
182198
SEKParams &sek_params,
183-
int N,
199+
std::size_t N,
184200
std::size_t n_tiles,
185201
std::size_t iter,
186202
std::size_t param_idx);

core/include/gprat/gprat.hpp

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@ struct GP_data
2727
std::string file_path;
2828

2929
/** @brief Number of samples in the data */
30-
int n_samples;
30+
std::size_t n_samples;
3131

3232
/** @brief Number of GP regressors */
33-
int n_regressors;
33+
std::size_t n_regressors;
3434

3535
/** @brief Vector containing the data */
3636
std::vector<double> data;
@@ -41,10 +41,10 @@ struct GP_data
4141
*
4242
* The file specified by `f_path` must contain `n` samples.
4343
*
44-
* @param f_path Path to the file
44+
* @param file_path Path to the file
4545
* @param n Number of samples
4646
*/
47-
GP_data(const std::string &file_path, int n, int n_reg);
47+
GP_data(const std::string &file_path, std::size_t n, std::size_t n_reg);
4848
};
4949

5050
/**
@@ -64,10 +64,10 @@ class GP
6464
std::vector<double> training_output_;
6565

6666
/** @brief Number of tiles */
67-
int n_tiles_;
67+
std::size_t n_tiles_;
6868

6969
/** @brief Size of each tile in each dimension */
70-
int n_tile_size_;
70+
std::size_t n_tile_size_;
7171

7272
/**
7373
* @brief List of bools indicating trainable parameters: lengthscale,
@@ -82,7 +82,7 @@ class GP
8282

8383
public:
8484
/** @brief Number of regressors */
85-
int n_reg;
85+
std::size_t n_reg;
8686

8787
/**
8888
* @brief Hyperarameters of the squared exponential kernel
@@ -105,10 +105,10 @@ class GP
105105
*/
106106
GP(std::vector<double> input,
107107
std::vector<double> output,
108-
int n_tiles,
109-
int n_tile_size,
110-
int n_regressors,
111-
std::vector<double> kernel_hyperparams,
108+
std::size_t n_tiles,
109+
std::size_t n_tile_size,
110+
std::size_t n_regressors,
111+
const std::vector<double> &kernel_hyperparams,
112112
std::vector<bool> trainable_bool,
113113
std::shared_ptr<Target> target);
114114

@@ -127,10 +127,10 @@ class GP
127127
*/
128128
GP(std::vector<double> input,
129129
std::vector<double> output,
130-
int n_tiles,
131-
int n_tile_size,
132-
int n_regressors,
133-
std::vector<double> kernel_hyperparams,
130+
std::size_t n_tiles,
131+
std::size_t n_tile_size,
132+
std::size_t n_regressors,
133+
const std::vector<double> &kernel_hyperparams,
134134
std::vector<bool> trainable_bool);
135135

136136
/**
@@ -150,10 +150,10 @@ class GP
150150
*/
151151
GP(std::vector<double> input,
152152
std::vector<double> output,
153-
int n_tiles,
154-
int n_tile_size,
155-
int n_regressors,
156-
std::vector<double> kernel_hyperparams,
153+
std::size_t n_tiles,
154+
std::size_t n_tile_size,
155+
std::size_t n_regressors,
156+
const std::vector<double> &kernel_hyperparams,
157157
std::vector<bool> trainable_bool,
158158
int gpu_id,
159159
int n_streams);
@@ -176,14 +176,14 @@ class GP
176176
/**
177177
* @brief Predict output for test input
178178
*/
179-
std::vector<double> predict(const std::vector<double> &test_data, int m_tiles, int m_tile_size);
179+
std::vector<double> predict(const std::vector<double> &test_data, std::size_t m_tiles, std::size_t m_tile_size);
180180

181181
/**
182182
* @brief Predict output for test input and additionally provide
183183
* uncertainty for the predictions.
184184
*/
185185
std::vector<std::vector<double>>
186-
predict_with_uncertainty(const std::vector<double> &test_data, int m_tiles, int m_tile_size);
186+
predict_with_uncertainty(const std::vector<double> &test_data, std::size_t m_tiles, std::size_t m_tile_size);
187187

188188
/**
189189
* @brief Predict output for test input and additionally compute full
@@ -196,7 +196,7 @@ class GP
196196
* @return Full covariance matrix
197197
*/
198198
std::vector<std::vector<double>>
199-
predict_with_full_cov(const std::vector<double> &test_data, int m_tiles, int m_tile_size);
199+
predict_with_full_cov(const std::vector<double> &test_data, std::size_t m_tiles, std::size_t m_tile_size);
200200

201201
/**
202202
* @brief Optimize hyperparameters
@@ -217,7 +217,7 @@ class GP
217217
*
218218
* @return loss
219219
*/
220-
double optimize_step(AdamParams &adam_params, int iter);
220+
double optimize_step(AdamParams &adam_params, std::size_t iter);
221221

222222
/**
223223
* @brief Calculate loss for given data and Gaussian process model

core/include/gprat/utils.hpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@ GPRAT_NS_BEGIN
2020
* @param n_samples Number of samples
2121
* @param n_tile_size Size of each tile
2222
*/
23-
int compute_train_tiles(int n_samples, int n_tile_size);
23+
std::size_t compute_train_tiles(std::size_t n_samples, std::size_t n_tile_size);
2424

2525
/**
2626
* @brief Compute the number of tiles for training data, given the number of
2727
* samples and the size of each tile.
2828
*
2929
* @param n_samples Number of samples
30-
* @param n_tile_size Size of each tile
30+
* @param n_tiles Size of each tile
3131
*/
32-
int compute_train_tile_size(int n_samples, int n_tiles);
32+
std::size_t compute_train_tile_size(std::size_t n_samples, std::size_t n_tiles);
3333

3434
/**
3535
* @brief Compute the number of test tiles and the size of a test tile.
@@ -41,15 +41,16 @@ int compute_train_tile_size(int n_samples, int n_tiles);
4141
* @param n_tiles Number of tiles
4242
* @param n_tile_size Size of each tile
4343
*/
44-
std::pair<int, int> compute_test_tiles(int n_test, int n_tiles, int n_tile_size);
44+
std::pair<std::size_t, std::size_t>
45+
compute_test_tiles(std::size_t n_test, std::size_t n_tiles, std::size_t n_tile_size);
4546

4647
/**
4748
* @brief Load data from file
4849
*
4950
* @param file_path Path to the file
5051
* @param n_samples Number of samples to load
5152
*/
52-
std::vector<double> load_data(const std::string &file_path, int n_samples, int offset);
53+
std::vector<double> load_data(const std::string &file_path, std::size_t n_samples, std::size_t offset);
5354

5455
/**
5556
* @brief Print a vector

0 commit comments

Comments
 (0)