|
1 | 1 | from __future__ import division |
2 | 2 |
|
3 | 3 | from builtins import str, range, object |
4 | | -from past.utils import old_div |
5 | 4 |
|
6 | 5 | import autograd.numpy as np |
7 | 6 | from ._utils import outer_rows |
@@ -122,7 +121,7 @@ def simulate(self, gof, fea_tensor=None): |
122 | 121 | Tau = fea_tensor.reshape(n, -1) |
123 | 122 | # Make sure it is a matrix i.e, np.cov returns a scalar when Tau is |
124 | 123 | # 1d. |
125 | | - cov = old_div(Tau.T.dot(Tau), n) + np.zeros((1, 1)) |
| 124 | + cov = Tau.T.dot(Tau) / n + np.zeros((1, 1)) |
126 | 125 | n_simulate = self.n_simulate |
127 | 126 |
|
128 | 127 | arr_nfssd, eigs = list_simulate_spectral(cov, J, n_simulate, seed=self.seed) |
@@ -310,7 +309,7 @@ def feature_tensor(self, X): |
310 | 309 | # n x d x J tensor |
311 | 310 | grad_logp_K = outer_rows(grad_logp, K) |
312 | 311 |
|
313 | | - Xi = old_div((grad_logp_K + dKdV), np.sqrt(d * J)) |
| 312 | + Xi = (grad_logp_K + dKdV) / np.sqrt(d * J) |
314 | 313 | return Xi |
315 | 314 |
|
316 | 315 |
|
@@ -340,9 +339,9 @@ def power_criterion(p, X, k, test_locs, reg=1e-2, use_unbiased=True, use_2terms= |
340 | 339 |
|
341 | 340 | # mean/sd criterion |
342 | 341 | sigma_h1 = np.sqrt(u_variance + reg) |
343 | | - ratio = old_div(u_mean, sigma_h1) |
| 342 | + ratio = u_mean / sigma_h1 |
344 | 343 | if use_2terms: |
345 | | - obj = old_div(-1.0, (np.sqrt(n) * sigma_h1)) + np.sqrt(n) * ratio |
| 344 | + obj = -1.0 / (np.sqrt(n) * sigma_h1) + np.sqrt(n) * ratio |
346 | 345 | else: |
347 | 346 | obj = ratio |
348 | 347 | return obj |
@@ -372,8 +371,8 @@ def ustat_h1_mean_variance(fea_tensor, return_variance=True, use_unbiased=True): |
372 | 371 | # n x d*J |
373 | 372 | Tau = np.reshape(Xi, [n, d * J]) |
374 | 373 | if use_unbiased: |
375 | | - t1 = np.sum(np.mean(Tau, 0) ** 2) * (old_div(n, float(n - 1))) |
376 | | - t2 = old_div(np.sum(np.mean(Tau**2, 0)), float(n - 1)) |
| 374 | + t1 = np.sum(np.mean(Tau, 0) ** 2) * (n / float(n - 1)) |
| 375 | + t2 = np.sum(np.mean(Tau**2, 0)) / float(n - 1) |
377 | 376 | # stat is the mean |
378 | 377 | stat = t1 - t2 |
379 | 378 | else: |
@@ -425,20 +424,16 @@ def simulate_null_dist(eigs, J, n_simulate=2000, seed=7): |
425 | 424 | ------- |
426 | 425 | fssds : a numpy array of simulated statistics. |
427 | 426 | """ |
428 | | - d = old_div(len(eigs), J) |
| 427 | + d = len(eigs) // J # Use integer division |
429 | 428 | assert d > 0 |
430 | | - # draw at most d x J x block_size values at a time |
431 | | - block_size = max(20, int(old_div(1000.0, (d * J)))) |
| 429 | + block_size = max(20, int(1000.0 / (d * J))) |
432 | 430 | fssds = np.zeros(n_simulate) |
433 | 431 | from_ind = 0 |
434 | 432 | rng = default_rng(seed) |
435 | 433 | while from_ind < n_simulate: |
436 | 434 | to_draw = min(block_size, n_simulate - from_ind) |
437 | | - # draw chi^2 random variables. |
438 | | - chi2 = rng.standard_normal(size=(d * J, to_draw)) ** 2 |
439 | | - # an array of length to_draw |
| 435 | + chi2 = rng.standard_normal(size=(d * J, to_draw)) ** 2 # d * J is now an integer |
440 | 436 | sim_fssds = eigs.dot(chi2 - 1.0) |
441 | | - # store |
442 | 437 | end_ind = from_ind + to_draw |
443 | 438 | fssds[from_ind:end_ind] = sim_fssds |
444 | 439 | from_ind = end_ind |
|
0 commit comments