Skip to content
This repository was archived by the owner on Jul 16, 2021. It is now read-only.

Commit f25dba2

Browse files
tafiaAtheMathmo
authored andcommitted
Use more iter_rows and select_rows (#107)
* use iter_rows on GaussianProcess::ker_mat * use iter_rows on SVM::ker_mat * use iter_rows on SVM::train * use select_rows in DBSCAN::expand_cluster * use iter_rows in NaiveBayes::update_param * use iter_rows_mut in GaussianMixtureModel::update_param * use iter_rows in NaiveBayes::get_class * remove unecessary &[..] * use select_rows in SVM::train * use matrix scalar product in tests
1 parent 2a4694c commit f25dba2

File tree

8 files changed

+31
-54
lines changed

8 files changed

+31
-54
lines changed

src/learning/dbscan.rs

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -178,11 +178,8 @@ impl DBSCAN {
178178
let visited = self._visited[*data_point_idx];
179179
if !visited {
180180
self._visited[*data_point_idx] = true;
181-
let sub_neighbours =
182-
self.region_query(&inputs.data()[data_point_idx * inputs.cols()..(data_point_idx +
183-
1) *
184-
inputs.cols()],
185-
inputs);
181+
let sub_neighbours = self.region_query(inputs.select_rows(&[*data_point_idx]).data(),
182+
inputs);
186183

187184
if sub_neighbours.len() >= self.min_points {
188185
self.expand_cluster(inputs, *data_point_idx, sub_neighbours, cluster);

src/learning/gmm.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -279,9 +279,9 @@ impl GaussianMixtureModel {
279279

280280
let mut new_means = membership_weights.transpose() * inputs;
281281

282-
for (idx, mean) in new_means.mut_data().chunks_mut(d).enumerate() {
283-
for m in mean {
284-
*m /= sum_weights[idx];
282+
for (mean, w) in new_means.iter_rows_mut().zip(sum_weights.data().iter()) {
283+
for m in mean.iter_mut() {
284+
*m /= *w;
285285
}
286286
}
287287

src/learning/gp.rs

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -122,19 +122,14 @@ impl<T: Kernel, U: MeanFunc> GaussianProcess<T, U> {
122122
/// Construct a kernel matrix
123123
fn ker_mat(&self, m1: &Matrix<f64>, m2: &Matrix<f64>) -> Matrix<f64> {
124124
assert_eq!(m1.cols(), m2.cols());
125-
let cols = m1.cols();
126125

127126
let dim1 = m1.rows();
128127
let dim2 = m2.rows();
129128

130129
let mut ker_data = Vec::with_capacity(dim1 * dim2);
131-
132-
for i in 0..dim1 {
133-
for j in 0..dim2 {
134-
ker_data.push(self.ker.kernel(&m1.data()[i * cols..(i + 1) * cols],
135-
&m2.data()[j * cols..(j + 1) * cols]));
136-
}
137-
}
130+
ker_data.extend(
131+
m1.iter_rows().flat_map(|row1| m2.iter_rows()
132+
.map(move |row2| self.ker.kernel(row1, row2))));
138133

139134
Matrix::new(dim1, dim2, ker_data)
140135
}

src/learning/naive_bayes.rs

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ impl<T: Distribution> NaiveBayes<T> {
151151
self.class_counts = vec![0; class_count];
152152
let mut class_data = vec![Vec::new(); class_count];
153153

154-
for (idx, row) in targets.data().chunks(class_count).enumerate() {
154+
for (idx, row) in targets.iter_rows().enumerate() {
155155
// Find the class of this input
156156
let class = NaiveBayes::<T>::find_class(row);
157157

@@ -195,15 +195,13 @@ impl<T: Distribution> NaiveBayes<T> {
195195
}
196196

197197
fn get_classes(log_probs: Matrix<f64>) -> Vec<usize> {
198-
let class_count = log_probs.cols();
199198
let mut data_classes = Vec::with_capacity(log_probs.rows());
200199

201-
// Argmax each class log-probability per input
202-
for row in log_probs.data().chunks(class_count) {
200+
data_classes.extend(log_probs.iter_rows().map(|row| {
201+
// Argmax each class log-probability per input
203202
let (class, _) = utils::argmax(row);
204-
205-
data_classes.push(class);
206-
}
203+
class
204+
}));
207205

208206
data_classes
209207
}

src/learning/optim/fmincg.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ impl<M: Optimizable> OptimAlgorithm<M> for ConjugateGD {
115115

116116
x = x + &s * z1;
117117

118-
let cost = model.compute_grad(&x.data()[..], inputs, targets);
118+
let cost = model.compute_grad(x.data(), inputs, targets);
119119
f2 = cost.0;
120120
df2 = Vector::new(cost.1);
121121

@@ -167,7 +167,7 @@ impl<M: Optimizable> OptimAlgorithm<M> for ConjugateGD {
167167

168168
z1 += z2;
169169
x = x + &s * z2;
170-
let cost_grad = model.compute_grad(&x.data()[..], inputs, targets);
170+
let cost_grad = model.compute_grad(x.data(), inputs, targets);
171171
f2 = cost_grad.0;
172172
df2 = Vector::new(cost_grad.1);
173173

@@ -215,7 +215,7 @@ impl<M: Optimizable> OptimAlgorithm<M> for ConjugateGD {
215215
z1 += z2;
216216
x = x + &s * z2;
217217

218-
let cost_grad = model.compute_grad(&x.data()[..], inputs, targets);
218+
let cost_grad = model.compute_grad(x.data(), inputs, targets);
219219
f2 = cost_grad.0;
220220
df2 = Vector::new(cost_grad.1);
221221

src/learning/optim/grad_desc.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ impl<M: Optimizable> OptimAlgorithm<M> for GradientDesc {
8080

8181
for _ in 0..self.iters {
8282
// Compute the cost and gradient for the current parameters
83-
let (cost, grad) = model.compute_grad(&optimizing_val.data()[..], inputs, targets);
83+
let (cost, grad) = model.compute_grad(optimizing_val.data(), inputs, targets);
8484

8585
// Early stopping
8686
if (start_iter_cost - cost).abs() < LEARNING_EPS {

src/learning/svm.rs

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -103,19 +103,15 @@ impl<K: Kernel> SVM<K> {
103103
/// Construct a kernel matrix
104104
fn ker_mat(&self, m1: &Matrix<f64>, m2: &Matrix<f64>) -> Matrix<f64> {
105105
assert_eq!(m1.cols(), m2.cols());
106-
let cols = m1.cols();
107106

108107
let dim1 = m1.rows();
109108
let dim2 = m2.rows();
110109

111110
let mut ker_data = Vec::with_capacity(dim1 * dim2);
112111

113-
for i in 0..dim1 {
114-
for j in 0..dim2 {
115-
ker_data.push(self.ker.kernel(&m1.data()[i * cols..(i + 1) * cols],
116-
&m2.data()[j * cols..(j + 1) * cols]));
117-
}
118-
}
112+
ker_data.extend(
113+
m1.iter_rows().flat_map(|row1| m2.iter_rows()
114+
.map(move |row2| self.ker.kernel(row1, row2))));
119115

120116
Matrix::new(dim1, dim2, ker_data)
121117
}
@@ -150,17 +146,13 @@ impl<K: Kernel> SupModel<Matrix<f64>, Vector<f64>> for SVM<K> {
150146

151147
let ones = Matrix::<f64>::ones(inputs.rows(), 1);
152148
let full_inputs = ones.hcat(inputs);
153-
let m = full_inputs.cols();
154149

155150
for t in 0..self.optim_iters {
156151
let i = rng.gen_range(0, n);
157-
let mut sum = 0f64;
158-
for j in 0..n {
159-
sum += alpha[j] * targets[j] *
160-
self.ker.kernel(&full_inputs.data()[i * m..(i + 1) * m],
161-
&full_inputs.data()[j * m..(j + 1) * m]);
162-
}
163-
sum *= targets[i] / (self.lambda * (t as f64));
152+
let row_i = full_inputs.select_rows(&[i]);
153+
let sum = full_inputs.iter_rows()
154+
.fold(0f64, |sum, row| sum + self.ker.kernel(row_i.data(), row)) *
155+
targets[i] / (self.lambda * (t as f64));
164156

165157
if sum < 1f64 {
166158
alpha[i] += 1f64;

src/learning/toolkit/regularization.rs

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -141,11 +141,8 @@ mod tests {
141141

142142
assert!((a - (input_mat.norm() / 12f64)) < 1e-18);
143143

144-
let true_grad = input_mat.data()
145-
.iter()
146-
.map(|x| x / 6f64)
147-
.collect::<Vec<_>>();
148-
for eps in (b - Matrix::new(3, 4, true_grad)).into_vec() {
144+
let true_grad = &input_mat / 6f64;
145+
for eps in (b - true_grad).into_vec() {
149146
assert!(eps < 1e-18);
150147
}
151148
}
@@ -162,16 +159,14 @@ mod tests {
162159

163160
assert!(a - ((input_mat.norm() / 24f64) + (42f64 / 12f64)) < 1e-18);
164161

165-
let l1_true_grad = vec![-1., -1., -1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
162+
let l1_true_grad = Matrix::new(3, 4,
163+
vec![-1., -1., -1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
166164
.into_iter()
167165
.map(|x| x / 12f64)
168-
.collect::<Vec<_>>();
169-
let l2_true_grad = input_mat.data()
170-
.iter()
171-
.map(|x| x / 12f64)
172-
.collect::<Vec<_>>();
166+
.collect::<Vec<_>>());
167+
let l2_true_grad = &input_mat / 12f64;
173168

174-
for eps in (b - Matrix::new(3, 4, l1_true_grad) - Matrix::new(3, 4, l2_true_grad))
169+
for eps in (b - l1_true_grad - l2_true_grad)
175170
.into_vec() {
176171
// Slightly lower boundary than others - more numerical error as more ops.
177172
assert!(eps < 1e-12);

0 commit comments

Comments
 (0)