res_reg_lmnt_awikner.reservoir_train_test

   1from numba.core.errors import NumbaPerformanceWarning
   2import warnings
   3import ray
   4import sys
   5import os
   6
   7from scipy.linalg import solve, solve_sylvester
   8from scipy.sparse.linalg import eigsh
   9from numba import njit
  10from numba.typed import List
  11import time
  12
  13from res_reg_lmnt_awikner.lorenzrungekutta_numba import lorenzrungekutta, lorenzrungekutta_pred
  14from res_reg_lmnt_awikner.ks_etdrk4 import kursiv_predict, kursiv_predict_pred
  15from res_reg_lmnt_awikner.csc_mult import *
  16from res_reg_lmnt_awikner.helpers import get_windows_path, poincare_max
  17from res_reg_lmnt_awikner.classes import RunOpts, NumericalModel, Reservoir, ResOutput
  18
  19warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
  20
  21
  22@njit
  23def str_to_int(s):
  24    # Converts a string to an int in numba compiled functions
  25    final_index, result = len(s) - 1, 0
  26    for i, v in enumerate(s):
  27        result += (ord(v) - 48) * (10 ** (final_index - i))
  28    return result
  29
  30
  31@njit(fastmath=True)
  32def mean_numba_axis1(mat):
  33    # Computes the mean over axis 1 in numba compiled functions
  34    res = np.zeros(mat.shape[0])
  35    for i in range(mat.shape[0]):
  36        res[i] = np.mean(mat[i])
  37
  38    return res
  39
  40
  41@njit(fastmath=True)
  42def sum_numba_axis0(mat):
  43    # Computes the sum over axis 0 in numba compiled functions
  44    res = np.zeros(mat.shape[1])
  45    for i in range(mat.shape[1]):
  46        res[i] = np.sum(mat[:, i])
  47    return res
  48
  49
  50@njit(fastmath=True)
  51def numba_var_axis0(pred):
  52    mean_all = sum_numba_axis0(pred) / pred.shape[0]
  53    variances_all = sum_numba_axis0((pred - mean_all) ** 2.0) / (pred.shape[0])
  54    return variances_all
  55
  56
  57@njit(fastmath=True)
  58def wasserstein_distance_empirical(measured_samples, true_samples):
  59    # Computes the wasserstein distance between the empirical CDFs of the two input sets of samples. Faster than scipy when compiled.
  60    if np.any(np.isnan(measured_samples)):
  61        return np.NAN
  62    if np.any(np.isinf(measured_samples)):
  63        return np.inf
  64    measured_samples.sort()
  65    true_samples.sort()
  66    n, m, n_inv, m_inv = (measured_samples.size, true_samples.size,
  67                          1 / measured_samples.size, 1 / true_samples.size)
  68    n_itr = 0
  69    m_itr = 0
  70    measured_cdf = 0
  71    true_cdf = 0
  72    wass_dist = 0
  73    if measured_samples[n_itr] < true_samples[m_itr]:
  74        prev_sample = measured_samples[n_itr]
  75        measured_cdf += n_inv
  76        n_itr += 1
  77    elif true_samples[m_itr] < measured_samples[n_itr]:
  78        prev_sample = true_samples[m_itr]
  79        true_cdf += m_inv
  80        m_itr += 1
  81    else:
  82        prev_sample = true_samples[m_itr]
  83        measured_cdf += n_inv
  84        true_cdf += m_inv
  85        n_itr += 1
  86        m_itr += 1
  87    while n_itr < n and m_itr < m:
  88        if measured_samples[n_itr] < true_samples[m_itr]:
  89            wass_dist += np.abs((measured_cdf - true_cdf)
  90                                * (measured_samples[n_itr] - prev_sample))
  91            prev_sample = measured_samples[n_itr]
  92            measured_cdf += n_inv
  93            n_itr += 1
  94        elif true_samples[m_itr] < measured_samples[n_itr]:
  95            wass_dist += np.abs((measured_cdf - true_cdf)
  96                                * (true_samples[m_itr] - prev_sample))
  97            prev_sample = true_samples[m_itr]
  98            true_cdf += m_inv
  99            m_itr += 1
 100        else:
 101            wass_dist += np.abs((measured_cdf - true_cdf)
 102                                * (true_samples[m_itr] - prev_sample))
 103            prev_sample = true_samples[m_itr]
 104            measured_cdf += n_inv
 105            true_cdf += m_inv
 106            n_itr += 1
 107            m_itr += 1
 108    if n_itr == n:
 109        for itr in range(m_itr, m):
 110            wass_dist += np.abs((1.0 - true_cdf) *
 111                                (true_samples[itr] - prev_sample))
 112            prev_sample = true_samples[itr]
 113            true_cdf += m_inv
 114    else:
 115        for itr in range(n_itr, n):
 116            wass_dist += np.abs((measured_cdf - 1.0) *
 117                                (measured_samples[itr] - prev_sample))
 118            prev_sample = measured_samples[itr]
 119            measured_cdf += n_inv
 120
 121    return wass_dist
 122
 123
 124@jit(fastmath=True, nopython=True)
 125def numba_eigsh(A):
 126    np.random.seed(0)
 127    v0 = np.random.rand(A.shape[0])
 128    with objmode(eigs_out='double[:]'):
 129        eigs_out = eigsh(A, k=6, v0=v0, maxiter=1e5, return_eigenvectors=False)
 130    return eigs_out
 131
 132
 133@jit(nopython=True, fastmath=True)
 134def numerical_model_wrapped(h=0.01, tau=0.1, T=300, ttsplit=5000, u0=0, system='lorenz',
 135                        params=np.array([[], []], dtype=np.complex128)):
 136    # Numba function for obtaining training and testing dynamical system time series data
 137    if system == 'lorenz':
 138        int_step = int(tau / h)
 139        u_arr = np.ascontiguousarray(lorenzrungekutta(u0, T, tau, int_step))
 140
 141        u_arr[0] = (u_arr[0] - 0) / 7.929788629895004
 142        u_arr[1] = (u_arr[1] - 0) / 8.9932616136662
 143        u_arr[2] = (u_arr[2] - 23.596294463016896) / 8.575917849311919
 144        new_params = params
 145    elif system == 'KS':
 146        u_arr, new_params = kursiv_predict(u0, tau=tau, T=T, params=params)
 147        u_arr = np.ascontiguousarray(u_arr) / (1.1876770355823614)
 148    elif system == 'KS_d2175':
 149        u_arr, new_params = kursiv_predict(u0, tau=tau, T=T, d=21.75, params=params)
 150        u_arr = np.ascontiguousarray(u_arr) / (1.2146066380280796)
 151    else:
 152        raise ValueError
 153
 154    u_arr_train = u_arr[:, :ttsplit + 1]
 155    # u[ttsplit], the (ttsplit+1)st element, is the last in u_arr_train and the first in u_arr_test
 156    u_arr_test = u_arr[:, ttsplit:]
 157    return u_arr_train, u_arr_test, ttsplit, new_params
 158
 159
 160@jit(nopython=True, fastmath=True)
 161def numerical_model_wrapped_pred(h=0.01, tau=0.1, T=300, ttsplit=5000, u0_array=np.array([[], []], dtype=np.complex128),
 162                             system='lorenz', params=np.array([[], []], dtype=np.complex128)):
 163    # Numba function for obtaining training and testing dynamical system time series data for a set of initial conditions.
 164    # This is used during test to compute the map error instead of a for loop over the entire prediction period.
 165    if system == 'lorenz':
 166        int_step = int(tau / h)
 167        u_arr = np.ascontiguousarray(
 168            lorenzrungekutta_pred(u0_array, tau, int_step))
 169
 170        u_arr[0] = (u_arr[0] - 0) / 7.929788629895004
 171        u_arr[1] = (u_arr[1] - 0) / 8.9932616136662
 172        u_arr[2] = (u_arr[2] - 23.596294463016896) / 8.575917849311919
 173        new_params = params
 174    elif system == 'KS':
 175        u_arr, new_params = kursiv_predict_pred(
 176            u0_array, tau=tau, T=T, params=params)
 177        u_arr = np.ascontiguousarray(u_arr) / (1.1876770355823614)
 178    elif system == 'KS_d2175':
 179        u_arr, new_params = kursiv_predict_pred(u0_array, tau=tau, T=T, params=params, d=21.75)
 180        u_arr = np.ascontiguousarray(u_arr) / (1.2146066380280796)
 181    else:
 182        raise ValueError
 183
 184    u_arr_train = u_arr[:, :ttsplit + 1]
 185    # u[ttsplit], the (ttsplit+1)st element, is the last in u_arr_train and the first in u_arr_test
 186    u_arr_test = u_arr[:, ttsplit:]
 187    return u_arr_train, u_arr_test, ttsplit, new_params
 188
 189
 190def getX(res, rk, x0=1, y0=1, z0=1):
 191    # Function to obtain reservoir states when in python interpreter. Calls get_X_wrapped.
 192    u_training = rk.u_arr_train
 193    res.X = get_X_wrapped(np.ascontiguousarray(u_training), res.X, res.Win_data, res.Win_indices, res.Win_indptr,
 194                          res.Win_shape,
 195                          res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage)
 196
 197    return res.X
 198
 199
 200@jit(nopython=True, fastmath=True)
 201def get_X_wrapped(u_training, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 202                  leakage, noise, noisetype='none', noise_scaling=0, noise_realization=0, traintype='normal'):
 203    # Numba compatible function for obtaining reservoir states using various types of noise.
 204    # Generally returns an array of reservoir states and the noiseless training data used as input.
 205    # If traintype is gradient, this function instead returns the resevoir states and the reservoir states derivatives.
 206
 207    if noisetype in ['gaussian', 'perturbation']:
 208        if traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq'] or 'confined' in traintype:
 209            # noise = gen_noise(u_training.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 210            for i in range(0, u_training[0].size):
 211                res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
 212                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 213                        1., u_training[:, i] + noise[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape,
 214                                                                        res_X[:, i]))
 215            u_training_wnoise = u_training + noise
 216        elif traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
 217            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 218            for i in range(0, u_training[0].size):
 219                res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
 220                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 221                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape,
 222                                                          res_X[:, i] + noise[:, i]))
 223            u_training_wnoise = u_training
 224        elif traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
 225            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 226            for i in range(0, u_training[0].size):
 227                res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
 228                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 229                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X[:, i]) + noise[:,
 230                                                                                                               i])
 231            u_training_wnoise = u_training
 232        else:
 233            raise ValueError
 234        return res_X, u_training_wnoise
 235    elif noisetype not in ['gaussian_onestep', 'perturbation_onestep'] and \
 236            ('gaussian' in noisetype and 'step' in noisetype):
 237        noise_steps = str_to_int(noisetype.replace('gaussian', '').replace(
 238            'perturbation', '').replace('step', ''))
 239        res_X_nonoise = np.copy(res_X)
 240        for i in range(0, u_training[0].size):
 241            res_X_nonoise[:, i + 1] = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
 242                mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 243                    1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X_nonoise[:, i]))
 244        if traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
 245            # noise = gen_noise(u_training.shape[0], u_training.shape[1], str(noisetype), noise_scaling, noise_gen, noise_realization)
 246            for i in range(0, u_training[0].size - noise_steps):
 247                temp_x = res_X_nonoise[:, i]
 248                temp_x = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
 249                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 250                        1., u_training[:, i] + noise[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x))
 251                for k in range(1, noise_steps):
 252                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
 253                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 254                            1., u_training[:, i + k] + noise[:, i + k])) + mult_vec(W_data, W_indices, W_indptr,
 255                                                                                    W_shape, temp_x))
 256                res_X[:, i + noise_steps] = temp_x
 257            u_training_wnoise = u_training + noise
 258        elif traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
 259            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 260            for i in range(0, u_training[0].size - noise_steps):
 261                temp_x = res_X_nonoise[:, i]
 262                temp_x = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
 263                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 264                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x + noise[:, i]))
 265                for k in range(1, noise_steps):
 266                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
 267                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 268                            1., u_training[:, i + k])) + mult_vec(W_data, W_indices, W_indptr, W_shape,
 269                                                                  temp_x + noise[:, i + k]))
 270                res_X[:, i + noise_steps] = temp_x
 271            u_training_wnoise = u_training
 272        elif traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
 273            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 274            for i in range(0, u_training[0].size - noise_steps):
 275                temp_x = res_X_nonoise[:, i]
 276                temp_x = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
 277                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 278                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x) + noise[:, i])
 279                for k in range(1, noise_steps):
 280                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
 281                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 282                            1., u_training[:, i + k])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x) + noise[
 283                                                                                                                  :,
 284                                                                                                                  i + k])
 285                res_X[:, i + noise_steps] = temp_x
 286            u_training_wnoise = u_training
 287        else:
 288            raise ValueError
 289        return res_X, u_training_wnoise
 290    elif noisetype in ['gaussian_onestep', 'perturbation_onestep']:
 291        if traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
 292            # noise = gen_noise(u_training.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 293            temp_x = res_X[:, 0]
 294            for i in range(0, u_training[0].size):
 295                res_X[:, i + 1] = (1.0 - leakage) * temp_x + leakage * np.tanh(
 296                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 297                        1., u_training[:, i] + noise[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x))
 298                temp_x = np.tanh(
 299                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1., u_training[:, i])) + mult_vec(
 300                        W_data, W_indices, W_indptr, W_shape, temp_x))
 301            u_training_wnoise = u_training + noise
 302        elif traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
 303            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 304            temp_x = res_X[:, 0]
 305            for i in range(0, u_training[0].size):
 306                res_X[:, i + 1] = (1.0 - leakage) * temp_x + leakage * np.tanh(
 307                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 308                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x + noise[:, i]))
 309                temp_x = np.tanh(
 310                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1., u_training[:, i])) + mult_vec(
 311                        W_data, W_indices, W_indptr, W_shape, temp_x))
 312            u_training_wnoise = u_training
 313        elif traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
 314            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
 315            temp_x = res_X[:, 0]
 316            for i in range(0, u_training[0].size):
 317                res_X[:, i + 1] = (1.0 - leakage) * temp_x + leakage * np.tanh(
 318                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 319                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x) + noise[:, i])
 320                temp_x = np.tanh(
 321                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1., u_training[:, i])) + mult_vec(
 322                        W_data, W_indices, W_indptr, W_shape, temp_x))
 323            u_training_wnoise = u_training
 324        else:
 325            raise ValueError
 326        return res_X, u_training_wnoise
 327    elif 'gaussian' in noisetype:
 328        noise_steps = str_to_int(noisetype.replace('gaussian', ''))
 329
 330        # noise = gen_noise(u_training.shape[0], u_training.shape[1], str(noisetype), noise_scaling, noise_gen, noise_realization)
 331        res_X_nonoise = np.copy(res_X)
 332        for i in range(0, u_training[0].size):
 333            res_X_nonoise[:, i + 1] = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
 334                mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 335                    1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X_nonoise[:, i]))
 336        for i in range(0, u_training[0].size - noise_steps):
 337            temp_x = res_X_nonoise[:, i]
 338            for k in range(noise_steps):
 339                if k == 0:
 340                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
 341                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 342                            1., u_training[:, i + k] + noise[:, i + k])) + mult_vec(W_data, W_indices, W_indptr,
 343                                                                                    W_shape, temp_x))
 344                else:
 345                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
 346                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 347                            1., u_training[:, i + k])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x))
 348            res_X[:, i + noise_steps] = temp_x
 349        u_training_wnoise = u_training + noise
 350        return res_X, u_training_wnoise
 351
 352    elif traintype in ['sylvester_wD'] or 'gradient' in traintype or 'regzero' in traintype:
 353        rsvr_size = res_X.shape[0]
 354        res_D = np.zeros((rsvr_size, u_training.shape[1] + 1))
 355        for i in range(0, u_training[0].size):
 356            res_internal = mult_vec(Win_data, Win_indices, Win_indptr, Win_shape,
 357                                    np.append(1., u_training[:, i])) + mult_vec(
 358                W_data, W_indices, W_indptr, W_shape, res_X[:, i])
 359            res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + \
 360                              leakage * np.tanh(res_internal)
 361            res_D[:, i + 1] = leakage / (np.power(np.cosh(res_internal), 2.0))
 362
 363        return res_X, res_D
 364    else:
 365        for i in range(0, u_training[0].size):
 366            res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
 367                mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
 368                    1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X[:, i]))
 369        return res_X, u_training
 370
 371
 372def gen_noise_driver(run_opts, data_shape, res_shape, noise_scaling, noise_stream):
 373    # Generates an array of noise states for a given noise type, number of noise realizations, and array of random number generators
 374    if run_opts.noisetype in ['gaussian', 'perturbation']:
 375        if run_opts.traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean',
 376                                  'rq'] or 'confined' in run_opts.traintype:
 377            noise = gen_noise(data_shape[0], data_shape[1], run_opts.noisetype,
 378                              noise_scaling, noise_stream, run_opts.noise_realizations)
 379        elif run_opts.traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
 380            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
 381                              noise_scaling, noise_stream, run_opts.noise_realizations)
 382        elif run_opts.traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
 383            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
 384                              noise_scaling, noise_stream, run_opts.noise_realizations)
 385        else:
 386            raise ValueError
 387    elif run_opts.noisetype not in ['gaussian_onestep', 'perturbation_onestep'] and \
 388            ('gaussian' in run_opts.noisetype and 'step' in run_opts.noisetype):
 389        if run_opts.traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
 390            noise = gen_noise(data_shape[0], data_shape[1], str(
 391                run_opts.noisetype), noise_scaling, noise_stream, run_opts.noise_realizations)
 392        elif run_opts.traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
 393            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
 394                              noise_scaling, noise_stream, run_opts.noise_realizations)
 395        elif run_opts.traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
 396            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
 397                              noise_scaling, noise_stream, run_opts.noise_realizations)
 398        else:
 399            raise ValueError
 400    elif run_opts.noisetype in ['gaussian_onestep', 'perturbation_onestep']:
 401        if run_opts.traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
 402            noise = gen_noise(data_shape[0], data_shape[1], run_opts.noisetype,
 403                              noise_scaling, noise_stream, run_opts.noise_realizations)
 404        elif run_opts.traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
 405            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
 406                              noise_scaling, noise_stream, run_opts.noise_realizations)
 407        elif run_opts.traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
 408            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
 409                              noise_scaling, noise_stream, run_opts.noise_realizations)
 410        else:
 411            raise ValueError
 412    elif 'gaussian' in run_opts.noisetype:
 413        noise = gen_noise(data_shape[0], data_shape[1], str(
 414            run_opts.noisetype), noise_scaling, noise_stream, run_opts.noise_realizations)
 415    elif 'gradient' in run_opts.noisetype or run_opts.noisetype == 'none':
 416        noise = np.zeros((1, data_shape[0], data_shape[1]))
 417    else:
 418        raise ValueError
 419    return noise
 420
 421
 422def gen_noise(noise_size, noise_length, noisetype, noise_scaling, noise_stream, noise_realizations):
 423    # Generates an array of noise vectors
 424    noise = np.zeros((noise_realizations, noise_size, noise_length))
 425    if 'gaussian' in noisetype:
 426        for i in range(noise_realizations):
 427            noise[i] = noise_stream[i].standard_normal(
 428                (noise_size, noise_length)) * np.sqrt(noise_scaling)
 429    if noisetype in ['perturbation', 'perturbation_onestep']:
 430        for noise_realization in noise_realizations:
 431            if noise_realization < noise_size:
 432                noise[noise_realization, noise_realization] = np.ones(
 433                    noise_length) * np.sqrt(noise_scaling)
 434            elif noise_realization < 2 * noise_length:
 435                noise[noise_realization, noise_realization -
 436                      noise_size] = -np.ones(noise_length) * np.sqrt(noise_scaling)
 437            else:
 438                raise ValueError
 439
 440    return noise
 441
 442
 443def get_states(run_opts, res, rk, noise, noise_scaling=0):
 444    # Obtains the matrices used to train the reservoir using either linear regression or a sylvester equation
 445    # (in the case of Sylvester or Sylvester_wD training types)
 446    # Calls the numba compatible wrapped function
 447    if run_opts.traintype == 'getD':
 448        Dn = getD(np.ascontiguousarray(rk.u_arr_train), res.X, res.Win_data, res.Win_indices, res.Win_indptr,
 449                  res.Win_shape, res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage,
 450                  run_opts.discard_time, noise, run_opts.noisetype, noise_scaling, run_opts.noise_realizations,
 451                  run_opts.traintype, run_opts.squarenodes)
 452        return Dn
 453    elif run_opts.traintype in ['sylvester', 'sylvester_wD']:
 454        res.data_trstates, res.states_trstates, res.Y_train, res.X_train, res.left_mat = get_states_wrapped(
 455            np.ascontiguousarray(rk.u_arr_train), run_opts.reg_train_times, res.X, res.Win_data, res.Win_indices,
 456            res.Win_indptr, res.Win_shape, res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage,
 457            run_opts.discard_time, noise, run_opts.noisetype, noise_scaling, run_opts.noise_realizations,
 458            run_opts.traintype, run_opts.squarenodes)
 459    else:
 460        res.data_trstates, res.states_trstates, res.Y_train, res.X_train, res.gradient_reg = get_states_wrapped(
 461            np.ascontiguousarray(rk.u_arr_train), run_opts.reg_train_times, res.X, res.Win_data, res.Win_indices,
 462            res.Win_indptr, res.Win_shape, res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage,
 463            run_opts.discard_time, noise, run_opts.noisetype, noise_scaling, run_opts.noise_realizations,
 464            run_opts.traintype, run_opts.squarenodes)
 465
 466
 467@jit(nopython=True, fastmath=True)
 468def get_squared(X, rsvr_size, squarenodes, dim=0):
 469    X_aug = np.copy(X)
 470    if not squarenodes:
 471        return X_aug
 472    else:
 473        X_out = np.vstack(
 474            (X_aug[0].reshape(1, -1), X_aug[1:rsvr_size + 1], X_aug[1:rsvr_size + 1] ** 2.0, X_aug[rsvr_size + 1:]))
 475        return X_out
 476
 477
 478@jit(nopython=True, fastmath=True)
 479def get_squared_vec(X, rsvr_size, squarenodes):
 480    X_aug = np.copy(X)
 481    if not squarenodes:
 482        return X_aug
 483    else:
 484        X_out = np.concatenate(
 485            (np.array([X_aug[0]]), X_aug[1:rsvr_size + 1], X_aug[1:rsvr_size + 1] ** 2.0, X_aug[rsvr_size + 1:]))
 486        return X_out
 487
 488
 489@jit(nopython=True, fastmath=True)
 490def get_states_wrapped(u_arr_train, reg_train_times, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data,
 491                       W_indices, W_indptr, W_shape, leakage, skip, noise, noisetype='none',
 492                       noise_scaling=0, noise_realizations=1, traintype='normal', squarenodes=False, q=0):
 493    # Numba compatible function to obtain the matrices used to train the reservoir using either linear regression or a sylvester equation.
 494    # The type of matrices depends on the traintype, number of noise realizations, and noisetype
 495    res_X = np.ascontiguousarray(res_X)
 496    u_arr_train = np.ascontiguousarray(u_arr_train)
 497    n, d = u_arr_train.shape
 498    rsvr_size, res_d = res_X.shape
 499    if squarenodes:
 500        res_feature_size = 2 * rsvr_size
 501    else:
 502        res_feature_size = rsvr_size
 503    data_trstates = np.zeros((n, res_feature_size + n + 1))
 504    states_trstates = np.zeros((reg_train_times.size, res_feature_size + n + 1, res_feature_size + n + 1))
 505    gradient_reg = np.zeros((reg_train_times.size, res_feature_size + n + 1, res_feature_size + n + 1))
 506    Y_train = np.ascontiguousarray(u_arr_train[:, skip:-1])
 507    print('Reg train times:')
 508    print(reg_train_times)
 509    reg_train_fracs = 1.0 / reg_train_times
 510    print('Reg train fracs:')
 511    print(reg_train_fracs)
 512    if traintype in ['normal', 'normalres1', 'normalres2']:
 513        # Normal multi-noise training that sums all reservoir state outer products
 514        for i in range(noise_realizations):
 515            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 516                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 517                                                 noise_scaling, i, traintype)
 518            X = X[:, skip:(res_d - 2)]
 519            X_train = np.ascontiguousarray(np.concatenate(
 520                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 521            X_train = get_squared(X_train, rsvr_size, squarenodes)
 522            data_trstates += Y_train @ X_train.T
 523            states_trstates[0] += X_train @ X_train.T
 524        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 525    elif traintype in ['rmeanq', 'rmeanqres1', 'rmeanqres2']:
 526        # Training using the mean and the rescaled sum of the perturbations from the mean
 527        for i in range(noise_realizations):
 528            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 529                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 530                                                 noise_scaling, i, traintype)
 531            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 532            X_train = np.ascontiguousarray(np.concatenate(
 533                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 534            X_train = get_squared(X_train, rsvr_size, squarenodes)
 535            if i == 0:
 536                X_train_mean = np.zeros(X_train.shape)
 537                X_all = np.zeros((X_train.shape[0], X_train.shape[1], noise_realizations))
 538            X_train_mean += X_train / noise_realizations
 539            X_all[:, :, i] = X_train
 540        data_trstates = Y_train @ X_train_mean.T
 541        states_trstates[0] = X_train_mean @ X_train_mean.T
 542        for i in range(1, reg_train_times.size):
 543            states_trstates[i] = np.copy(states_trstates[0])
 544        for (j, reg_train_time), prev_reg_train_time in zip(enumerate(reg_train_times),
 545                                                            np.append(np.zeros(1, dtype=np.int64),
 546                                                                      reg_train_times[:-1])):
 547            for i in range(noise_realizations):
 548                Q_fit = X_all[:, prev_reg_train_time:reg_train_time, i] - \
 549                        X_train_mean[:, prev_reg_train_time:reg_train_time]
 550                Q_fit_2 = Q_fit @ Q_fit.T
 551                for k in range(j, reg_train_times.size):
 552                    states_trstates[k] += Q_fit_2 * reg_train_fracs[k] / noise_realizations
 553        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 554    elif traintype in ['rmean', 'rmeanres1', 'rmeanres2']:
 555        # Training using the mean only
 556        for i in range(noise_realizations):
 557            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 558                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 559                                                 noise_scaling, i, traintype)
 560            X = X[:, skip:(res_d - 2)]
 561            X_train = np.ascontiguousarray(np.concatenate(
 562                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 563            X_train = get_squared(X_train, rsvr_size, squarenodes)
 564            if i == 0:
 565                X_train_mean = np.zeros(X_train.shape)
 566            X_train_mean += X_train / noise_realizations
 567        data_trstates = Y_train @ X_train_mean.T
 568        states_trstates[0] = X_train_mean @ X_train_mean.T
 569        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 570    elif traintype in ['rqmean', 'rqmeanres1', 'rqmeanres2']:
 571        # Training using the noiseless reservoir and the rescaled perturbations from the mean
 572        X_0, u_arr_train_noise = get_X_wrapped(
 573            u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 574            leakage, noise[0])
 575        X_0 = np.ascontiguousarray(X_0[:, skip:(res_d - 2)])
 576        X_train_0 = np.ascontiguousarray(np.concatenate(
 577            (np.ones((1, d - (skip + 1))), X_0, u_arr_train[:, skip - 1:-2]), axis=0))
 578        X_train_0 = get_squared(X_train_0, rsvr_size, squarenodes)
 579        X_all = np.zeros(
 580            (X_train_0.shape[0], X_train_0.shape[1], noise_realizations))
 581        for i in range(noise_realizations):
 582            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 583                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 584                                                 noise_scaling, i, traintype)
 585            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 586            X_train = np.ascontiguousarray(np.concatenate(
 587                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 588            X_train = get_squared(X_train, rsvr_size, squarenodes)
 589            if i == 0:
 590                X_train_mean = np.zeros(X_train.shape)
 591            X_train_mean += X_train / noise_realizations
 592            X_all[:, :, i] = X_train
 593        data_trstates = Y_train @ X_train_0.T
 594        states_trstates[0] = X_train_0 @ X_train_0.T
 595        for i in range(1, reg_train_times.size):
 596            states_trstates[i] = np.copy(states_trstates[0])
 597        prev_reg_train_time = 0
 598        for j, reg_train_time in enumerate(reg_train_times):
 599            for i in range(noise_realizations):
 600                Q_fit = X_all[:, prev_reg_train_time:reg_train_time, i] - \
 601                        X_train_mean[:, prev_reg_train_time:reg_train_time]
 602                for k in range(j, reg_train_times.size):
 603                    states_trstates[k] += (Q_fit @ Q_fit.T) * reg_train_fracs[k] / noise_realizations
 604            prev_reg_train_time = reg_train_time
 605        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 606    elif traintype in ['rq', 'rqres1', 'rqres2']:
 607        # Training using the noiseless reservoir and the rescaled perturbations from the noiseless reservoir
 608        X_0, u_arr_train_noise = get_X_wrapped(
 609            u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 610            leakage, noise[0])
 611        X_0 = np.ascontiguousarray(X_0[:, skip:(res_d - 2)])
 612        X_train_0 = np.ascontiguousarray(np.concatenate(
 613            (np.ones((1, d - (skip + 1))), X_0, u_arr_train[:, skip - 1:-2]), axis=0))
 614        X_train_0 = get_squared(X_train_0, rsvr_size, squarenodes)
 615        X_all = np.zeros(
 616            (X_train_0.shape[0], X_train_0.shape[1], noise_realizations))
 617        for i in range(noise_realizations):
 618            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 619                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 620                                                 noise_scaling, i, traintype)
 621            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 622            X_train = np.ascontiguousarray(np.concatenate(
 623                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 624            X_train = get_squared(X_train, rsvr_size, squarenodes)
 625            X_all[:, :, i] = X_train
 626        data_trstates = Y_train @ X_train_0.T
 627        states_trstates[0] = X_train_0 @ X_train_0.T
 628        for i in range(1, reg_train_times.size):
 629            states_trstates[i] = np.copy(states_trstates[0])
 630        prev_reg_train_time = 0
 631        for j, reg_train_time in enumerate(reg_train_times):
 632            for i in range(noise_realizations):
 633                Q_fit = X_all[:, prev_reg_train_time:reg_train_time, i] - \
 634                        X_train_0[:, prev_reg_train_time:reg_train_time]
 635                for k in range(j, reg_train_times.size):
 636                    states_trstates[k] += (Q_fit @ Q_fit.T) * reg_train_fracs[k] / noise_realizations
 637            prev_reg_train_time = reg_train_time
 638        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 639    elif traintype in ['rplusq', 'rplusqres1', 'rplusqres2']:
 640        # Training using the sum of the outer products of the sum of the noiseless reservoir and
 641        # the perturbation from the mean
 642        X_0, u_arr_train_noise = get_X_wrapped(
 643            u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 644            leakage, noise[0])
 645        X_0 = np.ascontiguousarray(X_0[:, skip:(res_d - 2)])
 646        X_train_0 = np.ascontiguousarray(np.concatenate(
 647            (np.ones((1, d - (skip + 1))), X_0, u_arr_train[:, skip - 1:-2]), axis=0))
 648        X_train_0 = get_squared(X_train_0, rsvr_size, squarenodes)
 649        X_all = np.zeros(
 650            (X_train_0.shape[0], X_train_0.shape[1], noise_realizations))
 651        for i in range(noise_realizations):
 652            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 653                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 654                                                 noise_scaling, i, traintype)
 655            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 656            X_train = np.ascontiguousarray(np.concatenate(
 657                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 658            X_train = get_squared(X_train, rsvr_size, squarenodes)
 659            if i == 0:
 660                X_train_mean = np.zeros(X_train.shape)
 661            X_train_mean += X_train / noise_realizations
 662            X_all[:, :, i] = X_train
 663        Y_fit = Y_train
 664        X_fit = X_all[:, :, 0] - X_train_mean + X_train_0
 665        for i in range(1, noise_realizations):
 666            Y_fit = np.append(Y_fit, Y_train, axis=1)
 667            X_fit = np.append(
 668                X_fit, X_all[:, :, i] - X_train_mean + X_train_0, axis=1)
 669        data_trstates = Y_fit @ X_fit.T
 670        states_trstates[0] = X_fit @ X_fit.T
 671        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 672    elif 'gradientk' in traintype and 'only' not in traintype:
 673        # Linearized k-step noise
 674        k = str_to_int(traintype.replace('gradientk', ''))
 675        reg_train_fracs = 1.0 / (reg_train_times - (k - 1))
 676        sparse_cutoff = 0.89
 677        break_flag = False
 678        X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
 679                             W_indptr,
 680                             W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 681        X = X[:, skip:(res_d - 2)]
 682        D = D[:, skip:(res_d - 2)]
 683        X_train = np.concatenate(
 684            (np.ones((1, d - (skip + 1))), X, u_arr_train[:, skip - 1:-2]), axis=0)
 685        X_train = get_squared(X_train, rsvr_size, squarenodes)
 686        gradient_reg_base = np.zeros((res_feature_size + n + 1, res_feature_size + n + 1))
 687        Win_nobias_data, Win_nobias_indices, Win_nobias_indptr, Win_nobias_shape = \
 688            get_Win_nobias(Win_data, Win_indices, Win_indptr, Win_shape)
 689        D_n_datas = List()
 690        D_n_indices = List()
 691        D_n_indptrs = List()
 692        D_n_shape = np.array([res_feature_size + n + 1, n])
 693        E_n_datas = List()
 694        E_n_indices = List()
 695        E_n_indptrs = List()
 696        E_n_shape = np.array([res_feature_size + n + 1, res_feature_size + n + 1])
 697        reg_comp_datas = List()
 698        reg_comp_indices = List()
 699        reg_comp_indptrs = List()
 700        reg_comp_shape = np.array([res_feature_size + n + 1, n])
 701        W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape = construct_jac_mat_csc_csc(
 702            Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, rsvr_size, n,
 703            squarenodes)
 704        leakage_data, leakage_indices, leakage_indptr, leakage_shape = construct_leakage_mat(rsvr_size, n, leakage,
 705                                                                                             squarenodes)
 706        reg_sum_avg_runtime = 0.
 707        E_n_avg_runtime = 0.
 708        reg_mult_avg_runtime = 0.
 709        D_n_avg_runtime = 0.
 710
 711        for i in range(k):
 712            D_n_data, D_n_idx, D_n_indptr = get_D_n(D[:, i], X[:, i], Win_nobias_data, Win_nobias_indices,
 713                                                    Win_nobias_indptr, Win_nobias_shape, D_n_shape, rsvr_size,
 714                                                    res_feature_size, n, squarenodes)
 715            D_n_datas.append(np.ascontiguousarray(D_n_data))
 716            D_n_indices.append(np.ascontiguousarray(D_n_idx))
 717            D_n_indptrs.append(np.ascontiguousarray(D_n_indptr))
 718        if k > 1:
 719            for i in range(1, k):
 720                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i], E_n_shape, rsvr_size, W_mat_data,
 721                                                        W_mat_indices, W_mat_indptr, W_mat_shape, leakage_data,
 722                                                        leakage_indices,
 723                                                        leakage_indptr, leakage_shape, squarenodes)
 724                E_n_datas.append(np.ascontiguousarray(E_n_data))
 725                E_n_indices.append(np.ascontiguousarray(E_n_idx))
 726                E_n_indptrs.append(np.ascontiguousarray(E_n_indptr))
 727
 728        for i in range(k - 1):
 729            reg_comp_data, reg_comp_idx, reg_comp_indptr = \
 730                np.copy(D_n_datas[i]), np.copy(D_n_indices[i]), np.copy(D_n_indptrs[i])
 731            if k > 1:
 732                for j in range(i, k - 1):
 733                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 734                        E_n_datas[j], E_n_indices[j], E_n_indptrs[j], E_n_shape,
 735                        reg_comp_data, reg_comp_idx, reg_comp_indptr, reg_comp_shape)
 736            reg_comp_datas.append(np.ascontiguousarray(reg_comp_data))
 737            reg_comp_indices.append(np.ascontiguousarray(reg_comp_idx))
 738            reg_comp_indptrs.append(np.ascontiguousarray(reg_comp_indptr))
 739        reg_comp_datas.append(np.ascontiguousarray(D_n_datas[-1]))
 740        reg_comp_indices.append(np.ascontiguousarray(D_n_indices[-1]))
 741        reg_comp_indptrs.append(np.ascontiguousarray(D_n_indptrs[-1]))
 742        sparsity = np.array([reg_comp_datas[j].size / (reg_comp_shape[0] * reg_comp_shape[1]) for j in range(k)])
 743
 744        for i in range(k, X.shape[1]):
 745            for j in range(k):
 746                if sparsity[j] < sparse_cutoff:
 747                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 748                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
 749                else:
 750                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
 751                                                                    reg_comp_indptrs[j], reg_comp_shape)
 752            assign_grad_reg = (i == reg_train_times)
 753            if np.any(assign_grad_reg):
 754                print(assign_grad_reg)
 755                print(gradient_reg[assign_grad_reg].shape)
 756                print((gradient_reg_base * reg_train_fracs[assign_grad_reg]).shape)
 757                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 758            if assign_grad_reg[-1]:
 759                break_flag = True
 760                break
 761            if k > 1:
 762                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i],
 763                                                        E_n_shape, rsvr_size, W_mat_data, W_mat_indices, W_mat_indptr,
 764                                                        W_mat_shape,
 765                                                        leakage_data, leakage_indices, leakage_indptr, leakage_shape,
 766                                                        squarenodes)
 767                E_n_datas[k - 2] = np.ascontiguousarray(E_n_data)
 768                E_n_indices[k - 2] = np.ascontiguousarray(E_n_idx)
 769                E_n_indptrs[k - 2] = np.ascontiguousarray(E_n_indptr)
 770            for j in range(k - 1):
 771                if k > 1:
 772                    if sparsity[j + 1] < sparse_cutoff:
 773                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_conv_mult(
 774                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 775                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 776                    else:
 777                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 778                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 779                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 780                    reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j] = \
 781                        np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 782                        np.ascontiguousarray(reg_comp_indptr)
 783            reg_comp_data, reg_comp_idx, reg_comp_indptr = get_D_n(D[:, i], X[:, i],
 784                                                                   Win_nobias_data, Win_nobias_indices,
 785                                                                   Win_nobias_indptr, Win_nobias_shape, D_n_shape,
 786                                                                   rsvr_size, res_feature_size, n, squarenodes)
 787            reg_comp_datas[k - 1], reg_comp_indices[k - 1], reg_comp_indptrs[k - 1] = \
 788                np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 789                np.ascontiguousarray(reg_comp_indptr)
 790        if not break_flag:
 791            for j in range(k):
 792                if sparsity[j] < sparse_cutoff:
 793                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 794                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
 795                else:
 796                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
 797                                                                    reg_comp_indptrs[j], reg_comp_shape)
 798            assign_grad_reg = i + 1 == reg_train_times
 799            if np.any(assign_grad_reg):
 800                print(assign_grad_reg)
 801                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 802        data_trstates = Y_train @ X_train.T
 803        states_trstates[0] = X_train @ X_train.T
 804        for i in range(1, reg_train_times.size):
 805            states_trstates[i] = np.ascontiguousarray(states_trstates[0])
 806        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 807    elif 'gradientk' in traintype and 'only' in traintype:
 808        # Linearized k-step noise
 809        k = str_to_int(traintype.replace('gradientk', '').replace('only', ''))
 810        reg_train_fracs = 1.0 / (reg_train_times - (k - 1))
 811        sparse_cutoff = 0.89
 812        break_flag = False
 813        X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
 814                             W_indptr,
 815                             W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 816        X = X[:, skip:(res_d - 2)]
 817        D = D[:, skip:(res_d - 2)]
 818        X_train = np.concatenate(
 819            (np.ones((1, d - (skip + 1))), X, u_arr_train[:, skip - 1:-2]), axis=0)
 820        X_train = get_squared(X_train, rsvr_size, squarenodes)
 821        gradient_reg_base = np.zeros((res_feature_size + n + 1, res_feature_size + n + 1))
 822        Win_nobias_data, Win_nobias_indices, Win_nobias_indptr, Win_nobias_shape = \
 823            get_Win_nobias(Win_data, Win_indices, Win_indptr, Win_shape)
 824        D_n_datas = List()
 825        D_n_indices = List()
 826        D_n_indptrs = List()
 827        D_n_shape = np.array([res_feature_size + n + 1, n])
 828        E_n_datas = List()
 829        E_n_indices = List()
 830        E_n_indptrs = List()
 831        E_n_shape = np.array([res_feature_size + n + 1, res_feature_size + n + 1])
 832        reg_comp_datas = List()
 833        reg_comp_indices = List()
 834        reg_comp_indptrs = List()
 835        reg_comp_shape = np.array([res_feature_size + n + 1, n])
 836        W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape = construct_jac_mat_csc_csc(
 837            Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, rsvr_size, n,
 838            squarenodes)
 839        leakage_data, leakage_indices, leakage_indptr, leakage_shape = construct_leakage_mat(rsvr_size, n, leakage,
 840                                                                                             squarenodes)
 841        reg_sum_avg_runtime = 0.
 842        E_n_avg_runtime = 0.
 843        reg_mult_avg_runtime = 0.
 844        D_n_avg_runtime = 0.
 845
 846        for i in range(k):
 847            D_n_data, D_n_idx, D_n_indptr = get_D_n(D[:, i], X[:, i], Win_nobias_data, Win_nobias_indices,
 848                                                    Win_nobias_indptr, Win_nobias_shape, D_n_shape, rsvr_size,
 849                                                    res_feature_size, n, squarenodes)
 850            D_n_datas.append(np.ascontiguousarray(D_n_data))
 851            D_n_indices.append(np.ascontiguousarray(D_n_idx))
 852            D_n_indptrs.append(np.ascontiguousarray(D_n_indptr))
 853        if k > 1:
 854            for i in range(1, k):
 855                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i], E_n_shape, rsvr_size, W_mat_data,
 856                                                        W_mat_indices, W_mat_indptr, W_mat_shape, leakage_data,
 857                                                        leakage_indices,
 858                                                        leakage_indptr, leakage_shape, squarenodes)
 859                E_n_datas.append(np.ascontiguousarray(E_n_data))
 860                E_n_indices.append(np.ascontiguousarray(E_n_idx))
 861                E_n_indptrs.append(np.ascontiguousarray(E_n_indptr))
 862
 863        for i in range(k - 1):
 864            reg_comp_data, reg_comp_idx, reg_comp_indptr = \
 865                np.copy(D_n_datas[i]), np.copy(D_n_indices[i]), np.copy(D_n_indptrs[i])
 866            if k > 1:
 867                for j in range(i, k - 1):
 868                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 869                        E_n_datas[j], E_n_indices[j], E_n_indptrs[j], E_n_shape,
 870                        reg_comp_data, reg_comp_idx, reg_comp_indptr, reg_comp_shape)
 871            reg_comp_datas.append(np.ascontiguousarray(reg_comp_data))
 872            reg_comp_indices.append(np.ascontiguousarray(reg_comp_idx))
 873            reg_comp_indptrs.append(np.ascontiguousarray(reg_comp_indptr))
 874        reg_comp_datas.append(np.ascontiguousarray(D_n_datas[-1]))
 875        reg_comp_indices.append(np.ascontiguousarray(D_n_indices[-1]))
 876        reg_comp_indptrs.append(np.ascontiguousarray(D_n_indptrs[-1]))
 877        sparsity = np.array([reg_comp_datas[j].size / (reg_comp_shape[0] * reg_comp_shape[1]) for j in range(k)])
 878
 879        for i in range(k, X.shape[1]):
 880            if sparsity[j] < sparse_cutoff:
 881                gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 882                    reg_comp_datas[0], reg_comp_indices[0], reg_comp_indptrs[0], reg_comp_shape)
 883            else:
 884                gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[0], reg_comp_indices[0],
 885                                                                reg_comp_indptrs[0], reg_comp_shape)
 886            assign_grad_reg = (i == reg_train_times)
 887            if np.any(assign_grad_reg):
 888                print(assign_grad_reg)
 889                print(gradient_reg[assign_grad_reg].shape)
 890                print((gradient_reg_base * reg_train_fracs[assign_grad_reg]).shape)
 891                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 892            if assign_grad_reg[-1]:
 893                break_flag = True
 894                break
 895            if k > 1:
 896                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i],
 897                                                        E_n_shape, rsvr_size, W_mat_data, W_mat_indices, W_mat_indptr,
 898                                                        W_mat_shape,
 899                                                        leakage_data, leakage_indices, leakage_indptr, leakage_shape,
 900                                                        squarenodes)
 901                E_n_datas[k - 2] = np.ascontiguousarray(E_n_data)
 902                E_n_indices[k - 2] = np.ascontiguousarray(E_n_idx)
 903                E_n_indptrs[k - 2] = np.ascontiguousarray(E_n_indptr)
 904            for j in range(k - 1):
 905                if k > 1:
 906                    if sparsity[j + 1] < sparse_cutoff:
 907                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_conv_mult(
 908                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 909                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 910                    else:
 911                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 912                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 913                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 914                    reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j] = \
 915                        np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 916                        np.ascontiguousarray(reg_comp_indptr)
 917            reg_comp_data, reg_comp_idx, reg_comp_indptr = get_D_n(D[:, i], X[:, i],
 918                                                                   Win_nobias_data, Win_nobias_indices,
 919                                                                   Win_nobias_indptr, Win_nobias_shape, D_n_shape,
 920                                                                   rsvr_size, res_feature_size, n, squarenodes)
 921            reg_comp_datas[k - 1], reg_comp_indices[k - 1], reg_comp_indptrs[k - 1] = \
 922                np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 923                np.ascontiguousarray(reg_comp_indptr)
 924        if not break_flag:
 925            if sparsity[j] < sparse_cutoff:
 926                gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 927                    reg_comp_datas[0], reg_comp_indices[0], reg_comp_indptrs[0], reg_comp_shape)
 928            else:
 929                gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[0], reg_comp_indices[0],
 930                                                                reg_comp_indptrs[0], reg_comp_shape)
 931            assign_grad_reg = i + 1 == reg_train_times
 932            if np.any(assign_grad_reg):
 933                print(assign_grad_reg)
 934                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 935        data_trstates = Y_train @ X_train.T
 936        states_trstates[0] = X_train @ X_train.T
 937        for i in range(1, reg_train_times.size):
 938            states_trstates[i] = np.ascontiguousarray(states_trstates[0])
 939        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 940        """
 941    elif traintype == 'sylvester':
 942        # Sylvester regularization w/o derivative
 943        X, p = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
 944                           W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 945        X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 946        X_train = np.ascontiguousarray(np.concatenate(
 947            (np.ones((1, d-(skip+1))), X, u_arr_train[:, skip-1:-2]), axis=0))
 948        data_trstates = Y_train @ X_train.T
 949        data_trstates[:, 1:rsvr_size+1] += noise_scaling**2/noise_realizations * \
 950            matrix_dot_left_T(W_mat_data, W_mat_indices,
 951                              W_mat_indptr, W_mat_shape, Win[:, 1:])
 952        states_trstates[0] = X_train @ X_train.T
 953        left_mat_base = -noise_scaling**2/noise_realizations * \
 954            (Win[:, 1:].T @ Win[:, 1:])
 955        left_mat = left_mat_base.reshape(1, left_mat_base.shape[0], left_mat_base.shape[1])
 956        return data_trstates, states_trstates, Y_train, X_train, left_mat
 957        
 958    elif traintype == 'sylvester_wD':
 959        # Sylvester regularization with derivative
 960        X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
 961                           W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 962        X = X[:, skip:(res_d - 2)]
 963        D = D[:, skip:(res_d - 2)]
 964        X_train = np.concatenate(
 965            (np.ones((1, d-(skip+1))), X, u_arr_train[:, skip-1:-2]), axis=0)
 966        Dmean = mean_numba_axis1(D)
 967        temp_mat = np.diag(Dmean) @ Win[:, 1:]
 968        target_correction = matrix_dot_left_T(
 969            W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape, temp_mat)
 970        left_mat_base = temp_mat.T @ temp_mat
 971        target_correction = noise_scaling**2/noise_realizations * target_correction
 972        left_mat_base = -noise_scaling**2/noise_realizations * left_mat_base
 973        left_mat = left_mat_base.reshape(1, left_mat_base.shape[0], left_mat_base.shape[1])
 974        data_trstates = Y_train @ X_train.T
 975        data_trstates[:, 1:rsvr_size+1] += target_correction
 976        states_trstates[0] = X_train @ X_train.T
 977        return data_trstates, states_trstates, Y_train, X_train, left_mat
 978        """
 979    elif 'regzero' in traintype:
 980        # Linearized k-step noise
 981        k = str_to_int(traintype.replace('regzerok', ''))
 982        reg_train_fracs = 1.0 / (reg_train_times - (k - 1))
 983        sparse_cutoff = 0.89
 984        break_flag = False
 985        X, tmp = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
 986                               W_indptr,
 987                               W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 988        X = X[:, skip:(res_d - 2)]
 989        X_train = np.concatenate(
 990            (np.ones((1, d - (skip + 1))), X, u_arr_train[:, skip - 1:-2]), axis=0)
 991        X_train = get_squared(X_train, rsvr_size, squarenodes)
 992        data_trstates = Y_train @ X_train.T
 993        states_trstates[0] = X_train @ X_train.T
 994        for i in range(1, reg_train_times.size):
 995            states_trstates[i] = np.ascontiguousarray(states_trstates[0])
 996        X_zero, D_zero = get_X_wrapped(np.zeros(u_arr_train.shape), res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 997                                       W_data, W_indices, W_indptr,
 998                                       W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 999        X = X_zero[:, skip:(res_d - 2)]
1000        D = D_zero[:, skip:(res_d - 2)]
1001        gradient_reg_base = np.zeros((res_feature_size + n + 1, res_feature_size + n + 1))
1002        Win_nobias_data, Win_nobias_indices, Win_nobias_indptr, Win_nobias_shape = \
1003            get_Win_nobias(Win_data, Win_indices, Win_indptr, Win_shape)
1004        D_n_datas = List()
1005        D_n_indices = List()
1006        D_n_indptrs = List()
1007        D_n_shape = np.array([res_feature_size + n + 1, n])
1008        E_n_datas = List()
1009        E_n_indices = List()
1010        E_n_indptrs = List()
1011        E_n_shape = np.array([res_feature_size + n + 1, res_feature_size + n + 1])
1012        reg_comp_datas = List()
1013        reg_comp_indices = List()
1014        reg_comp_indptrs = List()
1015        reg_comp_shape = np.array([res_feature_size + n + 1, n])
1016        W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape = construct_jac_mat_csc_csc(
1017            Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, rsvr_size, n,
1018            squarenodes)
1019        leakage_data, leakage_indices, leakage_indptr, leakage_shape = construct_leakage_mat(rsvr_size, n, leakage,
1020                                                                                             squarenodes)
1021        reg_sum_avg_runtime = 0.
1022        E_n_avg_runtime = 0.
1023        reg_mult_avg_runtime = 0.
1024        D_n_avg_runtime = 0.
1025
1026        for i in range(k):
1027            D_n_data, D_n_idx, D_n_indptr = get_D_n(D[:, i], X[:, i], Win_nobias_data, Win_nobias_indices,
1028                                                    Win_nobias_indptr, Win_nobias_shape, D_n_shape, rsvr_size,
1029                                                    res_feature_size, n, squarenodes)
1030            D_n_datas.append(np.ascontiguousarray(D_n_data))
1031            D_n_indices.append(np.ascontiguousarray(D_n_idx))
1032            D_n_indptrs.append(np.ascontiguousarray(D_n_indptr))
1033        for i in range(1, k):
1034            E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i], E_n_shape, rsvr_size, W_mat_data,
1035                                                    W_mat_indices, W_mat_indptr, W_mat_shape, leakage_data,
1036                                                    leakage_indices,
1037                                                    leakage_indptr, leakage_shape, squarenodes)
1038            E_n_datas.append(np.ascontiguousarray(E_n_data))
1039            E_n_indices.append(np.ascontiguousarray(E_n_idx))
1040            E_n_indptrs.append(np.ascontiguousarray(E_n_indptr))
1041
1042        for i in range(k - 1):
1043            reg_comp_data, reg_comp_idx, reg_comp_indptr = \
1044                np.copy(D_n_datas[i]), np.copy(D_n_indices[i]), np.copy(D_n_indptrs[i])
1045            for j in range(i, k - 1):
1046                reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
1047                    E_n_datas[j], E_n_indices[j], E_n_indptrs[j], E_n_shape,
1048                    reg_comp_data, reg_comp_idx, reg_comp_indptr, reg_comp_shape)
1049            reg_comp_datas.append(np.ascontiguousarray(reg_comp_data))
1050            reg_comp_indices.append(np.ascontiguousarray(reg_comp_idx))
1051            reg_comp_indptrs.append(np.ascontiguousarray(reg_comp_indptr))
1052        reg_comp_datas.append(np.ascontiguousarray(D_n_datas[-1]))
1053        reg_comp_indices.append(np.ascontiguousarray(D_n_indices[-1]))
1054        reg_comp_indptrs.append(np.ascontiguousarray(D_n_indptrs[-1]))
1055        sparsity = np.array([reg_comp_datas[j].size / (reg_comp_shape[0] * reg_comp_shape[1]) for j in range(k)])
1056
1057        for i in range(k, X.shape[1]):
1058            for j in range(k):
1059                if sparsity[j] < sparse_cutoff:
1060                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
1061                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
1062                else:
1063                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
1064                                                                    reg_comp_indptrs[j], reg_comp_shape)
1065            assign_grad_reg = (i == reg_train_times)
1066            if np.any(assign_grad_reg):
1067                print(assign_grad_reg)
1068                print(gradient_reg[assign_grad_reg].shape)
1069                print((gradient_reg_base * reg_train_fracs[assign_grad_reg]).shape)
1070                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
1071            if assign_grad_reg[-1]:
1072                break_flag = True
1073                break
1074            E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i],
1075                                                    E_n_shape, rsvr_size, W_mat_data, W_mat_indices, W_mat_indptr,
1076                                                    W_mat_shape,
1077                                                    leakage_data, leakage_indices, leakage_indptr, leakage_shape,
1078                                                    squarenodes)
1079            E_n_datas[k - 2] = np.ascontiguousarray(E_n_data)
1080            E_n_indices[k - 2] = np.ascontiguousarray(E_n_idx)
1081            E_n_indptrs[k - 2] = np.ascontiguousarray(E_n_indptr)
1082
1083            for j in range(k - 1):
1084                if sparsity[j + 1] < sparse_cutoff:
1085                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_conv_mult(
1086                        E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
1087                        reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
1088                else:
1089                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
1090                        E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
1091                        reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
1092                reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j] = \
1093                    np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
1094                    np.ascontiguousarray(reg_comp_indptr)
1095            reg_comp_data, reg_comp_idx, reg_comp_indptr = get_D_n(D[:, i], X[:, i],
1096                                                                   Win_nobias_data, Win_nobias_indices,
1097                                                                   Win_nobias_indptr, Win_nobias_shape, D_n_shape,
1098                                                                   rsvr_size, res_feature_size, n, squarenodes)
1099            reg_comp_datas[k - 1], reg_comp_indices[k - 1], reg_comp_indptrs[k - 1] = \
1100                np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
1101                np.ascontiguousarray(reg_comp_indptr)
1102        if not break_flag:
1103            for j in range(k):
1104                if sparsity[j] < sparse_cutoff:
1105                    # gradient_reg += reg_components[:, :, j] @ reg_components[:, :, j].T
1106                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
1107                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
1108                else:
1109                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
1110                                                                    reg_comp_indptrs[j], reg_comp_shape)
1111            assign_grad_reg = i + 1 == reg_train_times
1112            if np.any(assign_grad_reg):
1113                print(assign_grad_reg)
1114                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
1115        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
1116    else:
1117        # Noiseless training
1118        data_trstates = np.zeros((n, res_feature_size + 1 + n), dtype=np.float64)
1119        states_trstates[0] = np.zeros(
1120            (n + res_feature_size + 1, n + res_feature_size + 1), dtype=np.float64)
1121        X_train = np.zeros((n + res_feature_size + 1, d - (skip + 1)))
1122        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
1123
1124
1125@jit(nopython=True, fastmath=True)
1126def getD(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
1127         leakage, noise, skip, noisetype='none',
1128         noise_scaling=0, noise_realizations=1, traintype='normal', squarenodes=False):
1129    n, d = u_arr_train.shape
1130    rsvr_size, res_d = res_X.shape
1131    Y_train = u_arr_train[:, skip + 1:]
1132    X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
1133                         W_indptr, W_shape, leakage, noise, 'none', 0, 0, 'gradient', squarenodes)
1134    return [D[:, skip:(res_d - 2)]]
1135
1136
1137# CONCATENATE ALL THE DATA BEFORE RUNNING REGRESSION
1138
1139
1140def predict(res, u0, steps=1000, squarenodes=False):
1141    # Wrapper for the prediction function
1142    Y = predictwrapped(res.X, res.Win_data, res.Win_indices, res.Win_indptr, res.Win_shape, res.W_data, res.W_indices,
1143                       res.W_indptr, res.W_shape, res.Wout, res.leakage, u0, steps, squarenodes)
1144    return Y
1145
1146
1147@jit(nopython=True, fastmath=True)
1148def predictwrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, Wout,
1149                   leakage, u0, steps, squarenodes=False):
1150    # Numba compatible prediction function
1151    Y = np.empty((Win_shape[1] - 1, steps + 1))
1152    X = np.empty((res_X.shape[0], steps + 1))
1153
1154    Y[:, 0] = u0
1155    X[:, 0] = res_X[:, -2]
1156
1157    for i in range(0, steps):
1158        X[:, i + 1] = (1 - leakage) * X[:, i] + leakage * np.tanh(
1159            mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1.,
1160                                                                             Y[:, i])) + mult_vec(W_data, W_indices,
1161                                                                                                  W_indptr, W_shape,
1162                                                                                                  X[:, i]))
1163        Y[:, i + 1] = Wout @ get_squared_vec(np.concatenate((np.array([1.]), X[:, i + 1], Y[:, i])),
1164                                             X.shape[0], squarenodes)
1165
1166    return Y
1167
1168
1169def get_test_data(run_opts, test_stream, overall_idx, rkTime, split):
1170    # Function for obtaining test data sets used to validate reservoir performance
1171    # Uses an array of random number generators
1172    if run_opts.system == 'lorenz':
1173        ic = test_stream[0].random(3) * 2 - 1
1174        u0 = np.array([ic[0], ic[1], 30*ic[2]])
1175    elif run_opts.system in ['KS', 'KS_d2175']:
1176        u0 = (test_stream[0].random(64) * 2 - 1) * 0.6
1177        u0 = u0 - np.mean(u0)
1178    transient = 2000
1179    u_arr_train_nonoise, u_arr_test, p, params = numerical_model_wrapped(tau=run_opts.tau, T=rkTime + transient + split,
1180                                                                     ttsplit=split + transient,
1181                                                                     u0=u0, system=run_opts.system)
1182    u_arr_train_nonoise = u_arr_train_nonoise[:, transient:]
1183    rktest_u_arr_train_nonoise = np.zeros(
1184        (u_arr_train_nonoise.shape[0], u_arr_train_nonoise.shape[1], run_opts.num_tests))
1185    rktest_u_arr_test = np.zeros(
1186        (u_arr_test.shape[0], u_arr_test.shape[1], run_opts.num_tests))
1187    rktest_u_arr_train_nonoise[:, :, 0] = u_arr_train_nonoise
1188    rktest_u_arr_test[:, :, 0] = u_arr_test
1189    """
1190    print('Test data %d' % 0)
1191    print(rktest_u_arr_test[-3:,-3:,0])
1192    """
1193    for i in range(1, run_opts.num_tests):
1194        # np.random.seed(i)
1195        if run_opts.system == 'lorenz':
1196            ic = test_stream[i].random(3) * 2 - 1
1197            u0 = np.array([ic[0],ic[1], 30 * ic[2]])
1198        elif run_opts.system in ['KS', 'KS_d2175']:
1199            u0 = (test_stream[i].random(64) * 2 - 1) * 0.6
1200            u0 = u0 - np.mean(u0)
1201        u_arr_train_nonoise, rktest_u_arr_test[:, :, i], p, params = numerical_model_wrapped(tau=run_opts.tau,
1202                                                                                         T=rkTime + transient + split,
1203                                                                                         ttsplit=split + transient,
1204                                                                                         u0=u0, system=run_opts.system,
1205                                                                                         params=params)
1206        rktest_u_arr_train_nonoise[:, :, i] = u_arr_train_nonoise[:, transient:]
1207        """
1208        print('Test data %d' % i)
1209        print(rktest_u_arr_test[-3:,-3:,i])
1210        """
1211
1212    if run_opts.save_truth and overall_idx == 0:
1213        for (i, test) in enumerate(np.arange(run_opts.test_start, run_opts.test_start + run_opts.num_tests)):
1214            np.savetxt(os.path.join(run_opts.run_folder_name,
1215                                    '%s_tau%0.2f_true_test_%d.csv' % (run_opts.system, run_opts.tau, test)),
1216                       rktest_u_arr_test[:, :, i], delimiter=',')
1217
1218    return rktest_u_arr_train_nonoise, rktest_u_arr_test, params
1219
1220
1221def test(run_opts, res, Wout_itr, noise_in, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max, rkTime=1000,
1222         split=3000, params=np.array([[], []], dtype=np.complex128), showMapError=False, showTrajectories=False,
1223         showHist=False):
1224    # Wrapper function for the numba compatible test function.
1225
1226    stable_count, mean_rms, max_rms, variances, valid_time, rms, preds, wass_dist, pmap_max, pmap_max_wass_dist = test_wrapped(
1227        res.X, res.Win_data, res.Win_indices, res.Win_indptr, res.Win_shape, res.W_data, res.W_indices, res.W_indptr,
1228        res.W_shape, res.Wout[Wout_itr], res.leakage, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max,
1229        run_opts.num_tests, rkTime, split, noise_in, run_opts.system, params=params, pmap=run_opts.pmap,
1230        max_valid_time=run_opts.max_valid_time, squarenodes=run_opts.squarenodes, savepred=run_opts.savepred,
1231        save_time_rms=run_opts.save_time_rms)
1232
1233    return stable_count / run_opts.num_tests, mean_rms, max_rms, variances, valid_time, rms, preds, wass_dist, pmap_max, pmap_max_wass_dist
1234
1235
1236@jit(nopython=True, fastmath=True)
1237def test_wrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, Wout,
1238                 leakage, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max, num_tests, rkTime, split,
1239                 noise_in, system='lorenz', tau=0.1, params=np.array([[], []], dtype=np.complex128), pmap=False,
1240                 max_valid_time=500, squarenodes=False, savepred=False, save_time_rms=False):
1241    # Numba compatable function for testing trained reservoir performance against true system time series
1242    stable_count = 0
1243    num_vt_tests = ((rkTime - split)) // max_valid_time
1244    valid_time = np.zeros((num_tests, num_vt_tests))
1245    max_rms = np.zeros(num_tests)
1246    mean_rms = np.zeros(num_tests)
1247    means = np.zeros(num_tests)
1248    variances = np.zeros(num_tests)
1249    wass_dist = np.zeros(num_tests)
1250    pmap_max = []
1251    pmap_max_wass_dist = np.zeros(num_tests)
1252    if savepred:
1253        preds = np.zeros((num_tests, rktest_u_arr_test.shape[0], (rkTime - split) + 1))
1254    else:
1255        preds = np.empty((1, 1, 1), dtype=np.double)
1256    if save_time_rms:
1257        rms = np.zeros((num_tests, (rkTime - split)))
1258    else:
1259        rms = np.empty((1, 1), dtype=np.double)
1260
1261    # print(num_tests)
1262    for i in range(num_tests):
1263        with objmode(test_tic='double'):
1264            test_tic = time.perf_counter()
1265        res_X = (np.zeros((res_X.shape[0], split + 2)) * 2 - 1)
1266
1267        # sets res.X
1268        res_X, p = get_X_wrapped(np.ascontiguousarray(
1269            rktest_u_arr_train_nonoise[:, :, i]), res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data,
1270            W_indices, W_indptr, W_shape, leakage, noise_in)
1271        pred_full = predictwrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
1272                                   W_shape,
1273                                   Wout, leakage, u0=rktest_u_arr_test[:, 0, i], steps=(rkTime - split),
1274                                   squarenodes=squarenodes)
1275        if savepred:
1276            preds[i] = pred_full
1277        error = np.zeros(max_valid_time)
1278        if pmap:
1279            if system == 'lorenz':
1280                calc_pred = np.stack((pred_full[0] * 7.929788629895004,
1281                                      pred_full[1] * 8.9932616136662,
1282                                      pred_full[2] * 8.575917849311919 + 23.596294463016896))
1283                # wass_dist[i] = wasserstein_distance_empirical(calc_pred.flatten(), true_trajectory.flatten())
1284                pred_pmap_max = poincare_max(calc_pred, np.arange(pred_full.shape[0]))
1285            elif system == 'KS':
1286                # wass_dist[i] = wasserstein_distance_empirical(pred.flatten()*1.1876770355823614, true_trajectory.flatten())
1287                pred_pmap_max = poincare_max(pred_full * 1.1876770355823614, np.arange(pred_full.shape[0]))
1288            elif system == 'KS_d2175':
1289                pred_pmap_max = poincare_max(pred_full * 1.2146066380280796, np.arange(pred_full.shape[0]))
1290
1291            pmap_max.append(pred_pmap_max)
1292            for j in range(rktest_u_arr_test.shape[0]):
1293                if j == 0:
1294                    pred_pmap_max_all = pred_pmap_max[j]
1295            pmap_max_wass_dist[i] = wasserstein_distance_empirical(pred_pmap_max_all, true_pmap_max)
1296        else:
1297            pmap_max_wass_dist[i] = np.nan
1298
1299        if system == 'KS':
1300            vt_cutoff = 0.2 * 1.3697994268693887
1301        else:
1302            vt_cutoff = 0.2 * np.sqrt(2)
1303        check_vt = True
1304        array_compute = False
1305        pred = pred_full[:, :max_valid_time]
1306        for k in range(num_vt_tests):
1307            for j in range(1, pred.shape[1]):
1308                error[j] = np.sqrt(
1309                    np.mean((pred[:, j] - rktest_u_arr_test[:, k * max_valid_time + j, i]) ** 2.0))
1310
1311                if error[j] < vt_cutoff and check_vt:
1312                    valid_time[i, k] = j
1313                else:
1314                    # if check_vt:
1315                    check_vt = False
1316            #print('Valid Time')
1317            #print(valid_time[i, k])
1318            res_X = np.zeros((res_X.shape[0], max_valid_time + 2))
1319            res_X, p = get_X_wrapped(np.ascontiguousarray(
1320                rktest_u_arr_test[:, k * max_valid_time:(k + 1) * max_valid_time + 1, i]), res_X, Win_data, Win_indices,
1321                Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, leakage, noise_in)
1322            if k < (num_vt_tests - 1):
1323                pred = predictwrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
1324                                      W_shape,
1325                                      Wout, leakage, u0=rktest_u_arr_test[:, (k + 1) * max_valid_time, i],
1326                                      steps=max_valid_time - 1, squarenodes=squarenodes)
1327                check_vt = True
1328        if array_compute:
1329            if system == 'lorenz':
1330                rkmap_u_arr_train = numerical_model_wrapped_pred(u0_array=np.stack((pred_full[0] * 7.929788629895004,
1331                                                                pred_full[1] * 8.9932616136662,
1332                                                                pred_full[2] * 8.575917849311919 + 23.596294463016896)),
1333                                                             h=0.01, system=system, params=params, tau=tau,
1334                                                             ttsplit=pred_full.shape[1])[0]
1335            elif system == 'KS':
1336                u0 = pred_full * 1.1876770355823614
1337                rkmap_u_arr_train = numerical_model_wrapped_pred(
1338                    u0_array=u0, h=tau, T=1, system=system, params=params, ttsplit=pred_full.shape[1])[0]
1339            elif system == 'KS_d2175':
1340                u0 = pred_full * 1.2146066380280796
1341                rkmap_u_arr_train = numerical_model_wrapped_pred(
1342                    u0_array=u0, h=tau, T=1, system=system, params=params, ttsplit=pred_full.shape[1])[0]
1343            # print(rkmap_u_arr_train[0,:10])
1344            x2y2z2 = sum_numba_axis0(
1345                (pred_full[:, 1:] - rkmap_u_arr_train[:, :-1]) ** 2.0)
1346        else:
1347            x2y2z2 = np.zeros(pred_full[0].size - 1)
1348            for j in range(1, pred_full[0].size):
1349
1350                if system == 'lorenz':
1351                    rkmap_u_arr_train = \
1352                        numerical_model_wrapped(u0 = np.array([pred_full[0][j - 1] * 7.929788629895004,
1353                                            pred_full[1][j - 1] * 8.9932616136662,
1354                                            pred_full[2][j - 1] * 8.575917849311919 + 23.596294463016896]),
1355                                            h=0.01, T=1, tau=tau, system=system, params=params)[0]
1356                elif system == 'KS':
1357                    u0 = pred_full[:, j - 1] * (1.1876770355823614)
1358                    rkmap_u_arr_train = numerical_model_wrapped(h=tau, T=1, u0=u0, system=system, params=params)[0]
1359                elif system == 'KS_d2175':
1360                    u0 = pred_full[:, j - 1] * (1.2146066380280796)
1361                    rkmap_u_arr_train = numerical_model_wrapped(h=tau, T=1, u0=u0, system=system, params=params)[0]
1362
1363                x2y2z2[j - 1] = np.sum((pred_full[:, j] - rkmap_u_arr_train[:, 1]) ** 2)
1364        rms_test = np.sqrt(x2y2z2 / pred_full.shape[0])
1365        if save_time_rms:
1366            rms[i] = rms_test
1367        max_rms[i] = np.max(rms_test)
1368        mean_rms[i] = np.mean(rms_test)
1369        if system == 'lorenz':
1370            means[i] = np.mean(pred_full[0])
1371            variances[i] = np.var(pred_full[0])
1372        elif system in ['KS', 'KS_d2175']:
1373            means[i] = np.mean(pred_full.flatten())
1374            variances[i] = np.var(pred_full.flatten())
1375        if mean_rms[i] < 5e-3 and 0.9 < variances[i] and variances[i] < 1.1:
1376            stable_count += 1
1377        with objmode(test_toc='double'):
1378            test_toc = time.perf_counter()
1379        test_time = test_toc - test_tic
1380
1381    return stable_count, mean_rms, max_rms, variances, valid_time, rms, preds, wass_dist, pmap_max, pmap_max_wass_dist
1382
1383
1384def generate_res(run_opts, res_gen, res_itr, rk, noise_stream, noise_scaling=0):
1385    # Function for generating a reservoir and obtaining matrices used for training the reservoir
1386    reservoir = Reservoir(run_opts, res_gen, res_itr, rk.u_arr_train.shape[0])
1387    data_shape = rk.u_arr_train.shape
1388    res_shape = reservoir.X.shape
1389    noise_in = gen_noise_driver(run_opts, data_shape, res_shape, noise_scaling, noise_stream)
1390    get_states(run_opts, reservoir, rk, noise_in, noise_scaling)
1391    return reservoir, noise_in
1392
1393
1394def optim_func(run_opts, res_out, res, noise_in, noise, noise_idx, rktest_u_arr_train_nonoise, rktest_u_arr_test, alpha,
1395               alpha_idx, true_pmap_max, rkTime=400, split=2000, params=np.array([[], []], dtype=np.complex128)):
1396    # Function for training and testing the performance of a reservoir trained using a particular regularization parameter
1397    num_eigenvals = 500
1398    if run_opts.squarenodes:
1399        res_feature_size = res.rsvr_size * 2
1400    else:
1401        res_feature_size = res.rsvr_size
1402    if run_opts.prior == 'zero':
1403        idenmat = np.identity(
1404            res_feature_size + 1 + rktest_u_arr_train_nonoise.shape[0]) * alpha
1405        prior = np.zeros(res.data_trstates.shape)
1406    elif run_opts.prior == 'input_pass':
1407        idenmat = np.identity(
1408            res_feature_size + 1 + rktest_u_arr_train_nonoise.shape[0]) * alpha
1409        prior = np.concatenate((np.zeros((rktest_u_arr_train_nonoise.shape[0], 1 + res_feature_size)),
1410                                np.identity(rktest_u_arr_train_nonoise.shape[0])), axis=1) * alpha
1411
1412    num_reg_train_times = res.gradient_reg.shape[0]
1413    res.Wout = np.zeros((num_reg_train_times, rktest_u_arr_train_nonoise.shape[0],
1414                         rktest_u_arr_train_nonoise.shape[0] + 1 + res_feature_size))
1415    trainlen_mult = 1.0 / res.Y_train.shape[1]
1416    for i in range(num_reg_train_times):
1417        if run_opts.traintype not in ['sylvester', 'sylvester_wD']:
1418            res.Wout[i] = np.transpose(solve(np.transpose(
1419                res.states_trstates[i] * trainlen_mult + noise * res.gradient_reg[i] + idenmat),
1420                np.transpose(res.data_trstates * trainlen_mult + prior)))
1421        else:
1422            res.Wout[i] = solve_sylvester(
1423                res.left_mat[i], res.states_trstates[i] + idenmat, res.data_trstates)
1424        train_preds = res.Wout[i] @ res.X_train
1425        train_rms = np.sqrt(np.mean((train_preds - res.Y_train) ** 2.0, axis=0))
1426        res_out.train_mean_rms_out[:, :, i] = np.mean(train_rms)
1427        res_out.train_max_rms_out[:, :, i] = np.max(train_rms)
1428        if run_opts.save_eigenvals and alpha_idx == 0:
1429            eigenvals_out = np.linalg.eigvalsh(res.gradient_reg[i])
1430            res_out.grad_eigenvals[:, i] = eigenvals_out[eigenvals_out.size:eigenvals_out.size - num_eigenvals - 1: -1]
1431        res_out.stable_frac_out[noise_idx, alpha_idx, i], res_out.mean_rms_out[noise_idx, alpha_idx, i], \
1432        res_out.max_rms_out[noise_idx, alpha_idx, i], res_out.variances_out[noise_idx, alpha_idx, i], \
1433        res_out.valid_time_out[noise_idx, alpha_idx, i], res_out.rms_out[noise_idx, alpha_idx, i], \
1434        res_out.pred_out[noise_idx, alpha_idx, i], res_out.wass_dist_out[noise_idx, alpha_idx, i], \
1435        res_out.pmap_max_out[noise_idx, alpha_idx, i], res_out.pmap_max_wass_dist_out[noise_idx, alpha_idx, i] \
1436            = test(run_opts, res, i, noise_in, rktest_u_arr_train_nonoise, rktest_u_arr_test,
1437                   true_pmap_max, rkTime=rkTime, split=split, params=params)
1438
1439
1440def get_res_results(run_opts, res_itr, res_gen, rk, noise, noise_stream,
1441                    rktest_u_arr_train_nonoise, rktest_u_arr_test, rkTime_test, split_test, params,
1442                    train_seed, true_pmap_max):
1443    # Function for generating, training, and testing the performance of a reservoir given an input set of testing data time series,
1444    # a set of regularization values, and a set of noise magnitudes
1445    tic = time.perf_counter()
1446    print('Starting res %d' % res_itr)
1447    reservoir, noise_in = generate_res(run_opts, res_gen, res_itr, rk, noise_stream, noise)
1448
1449    toc = time.perf_counter()
1450    print('Res states found for itr %d, runtime: %f sec.' % (res_itr, toc - tic))
1451    num_vt_tests = (rkTime_test - split_test) // run_opts.max_valid_time
1452
1453    if not isinstance(noise, np.ndarray):
1454        noise_array = np.array([noise])
1455    else:
1456        noise_array = noise
1457    res_out = ResOutput(run_opts, noise)
1458    for noise_idx, noise in enumerate(noise_array):
1459        noise_tic = time.perf_counter()
1460        min_optim_func = lambda alpha, alpha_idx: optim_func(run_opts, res_out, reservoir, noise_in[noise_idx], noise,
1461                                                             noise_idx, \
1462                                                             rktest_u_arr_train_nonoise, rktest_u_arr_test, alpha,
1463                                                             alpha_idx,
1464                                                             true_pmap_max, rkTime_test, split_test, params)
1465        func_vals = np.zeros(run_opts.reg_values.size)
1466        for j, alpha_value in enumerate(run_opts.reg_values):
1467            print('Regularization: ', run_opts.reg_values[j])
1468            if run_opts.debug_mode:
1469                min_optim_func(alpha_value, j)
1470            else:
1471                try:
1472                    min_optim_func(alpha_value, j)
1473                except:
1474                    print('Training unsucessful for alpha:')
1475                    print(alpha_value)
1476        noise_toc = time.perf_counter()
1477        print('Noise test time: %f sec.' % (noise_toc - noise_tic))
1478    toc = time.perf_counter()
1479    runtime = toc - tic
1480    print('Iteration runtime: %f sec.' % runtime)
1481    return res_out, train_seed, noise_array, res_itr
1482
1483
1484def find_stability(run_opts, noise, train_seed, train_gen, res_itr, res_gen, test_stream, test_idxs, noise_stream,
1485                   overall_idx):
1486    # Main function for training and testing reservoir performance. This function first generates the training and testing data,
1487    # the passes to get_res_results to obtain th reservoir performance.
1488    import warnings
1489    warnings.filterwarnings("ignore")
1490    if isinstance(noise, np.ndarray):
1491        print_str = 'Noise: ['
1492        for i in range(noise.size):
1493            print_str += '%e, ' % noise[i]
1494        print_str += ']'
1495        print(print_str)
1496    else:
1497        print('Noise: %e' % noise)
1498    print('Training seed: %d' % train_seed)
1499    warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
1500    if run_opts.system == 'lorenz':
1501        if run_opts.test_time == 0:
1502            rkTime_test = 4000
1503        else:
1504            rkTime_test = run_opts.test_time
1505        split_test = run_opts.sync_time
1506    elif run_opts.system in ['KS', 'KS_d2175']:
1507        if run_opts.test_time == 0:
1508            rkTime_test = int(16000)
1509        else:
1510            rkTime_test = int(run_opts.test_time)
1511        split_test = int(run_opts.sync_time)
1512
1513    rktest_u_arr_train_nonoise, rktest_u_arr_test, params = get_test_data(
1514        run_opts, test_stream, overall_idx, rkTime=rkTime_test, split=split_test)
1515    # np.random.seed(train_seed)
1516    if run_opts.system == 'lorenz':
1517        ic = train_gen.random(3) * 2 - 1
1518        rk = NumericalModel(u0 = np.array([ic[0], ic[1], 30 * ic[2]]), tau=run_opts.tau,
1519                        T=run_opts.train_time + run_opts.discard_time,
1520                        ttsplit=run_opts.train_time + run_opts.discard_time, system=run_opts.system, params=params)
1521    elif run_opts.system in ['KS', 'KS_d2175']:
1522        u0 = 0.6 * (train_gen.random(64) * 2 - 1)
1523        u0 = u0 - np.mean(u0)
1524        rk = NumericalModel(tau=run_opts.tau, T=run_opts.train_time + run_opts.discard_time,
1525                        ttsplit=run_opts.train_time + run_opts.discard_time, u0=u0, system=run_opts.system,
1526                        params=params)
1527    if run_opts.pmap:
1528        true_pmap_max_filename = run_opts.root_folder + \
1529                                 '%s_tau%0.2f_true_pmap_max.csv' % (run_opts.system, run_opts.tau)
1530        if os.name == 'nt' and len(true_pmap_max_filename) >= 260:
1531            true_pmap_max_filename = get_windows_path(true_pmap_max_filename)
1532        true_pmap_max = np.loadtxt(true_pmap_max_filename, delimiter=',')
1533        print('Snippet of true poincare map:')
1534        print(true_pmap_max[:5])
1535    else:
1536        true_pmap_max = np.zeros(100)
1537
1538    res_out, train_seed, noise_array, itr = get_res_results(run_opts, res_itr, res_gen, \
1539                                                            rk, noise, noise_stream, rktest_u_arr_train_nonoise,
1540                                                            rktest_u_arr_test, \
1541                                                            rkTime_test, split_test, params, train_seed, true_pmap_max)
1542
1543    if not isinstance(noise, np.ndarray):
1544        noise_array = np.array([noise])
1545    else:
1546        noise_array = noise
1547
1548    res_out.save(run_opts, noise_array, res_itr, train_seed, test_idxs)
1549    # toc_global = time.perf_counter()
1550    # print('Total Runtime: %s sec.' % (toc_global - tic_global))
1551
1552    return rkTime_test, split_test
1553
1554
1555# If we are running reservoir tests in parallel, compile the find_stability as a remote function.
1556# Otherwise, wrap it.
1557
1558@ray.remote
1559def find_stability_remote(*args):
1560    return find_stability(*args)
1561
1562
1563def find_stability_serial(*args):
1564    return find_stability(*args)
1565
1566
1567def start_reservoir_test(argv=None, run_opts=None):
1568    # Main driver function for obtaining reservoir performance. This function processes input arguments and
1569    # creates random number generators for the different reservoirs, trainings, noise arrays, and tests.
1570    # It then calls find_stability in a loop, processes the output from find_stability, and saves the output to a folder.
1571
1572    if not isinstance(argv, type(None)) and isinstance(run_opts, type(None)):
1573        run_opts = RunOpts(argv)
1574    print(run_opts.run_folder_name)
1575    if run_opts.machine == 'personal':
1576        if run_opts.ifray:
1577            ray.init(num_cpus=run_opts.num_cpus)
1578    elif run_opts.machine == 'deepthought2':
1579        if run_opts.ifray:
1580            ray.init(address=os.environ["ip_head"])
1581
1582    if run_opts.traintype in ['normal', 'normalres1', 'normalres2', 'rmean', 'rmeanres1', 'rmeanres2',
1583                              'rplusq', 'rplusqres1', 'rplusqres2']:
1584        run_opts.reg_values = run_opts.reg_values * run_opts.noise_realizations
1585    if run_opts.reg_train_times is None:
1586        run_opts.reg_train_times = np.array([run_opts.train_time - run_opts.discard_time])
1587
1588    ss_res = np.random.SeedSequence(12)
1589    ss_train = np.random.SeedSequence(34)
1590    ss_test = np.random.SeedSequence(56)
1591    ss_noise = np.random.SeedSequence(78)
1592    if run_opts.traintype in ['gradient1', 'gradient2',
1593                              'gradient12'] or 'gradientk' in run_opts.traintype or 'regzerok' in run_opts.traintype:
1594        res_seeds = ss_res.spawn(run_opts.res_per_test + run_opts.res_start)
1595        train_seeds = ss_train.spawn(run_opts.num_trains + run_opts.train_start)
1596        test_seeds = ss_test.spawn(run_opts.num_tests + run_opts.test_start)
1597        test_idxs = np.arange(run_opts.test_start, run_opts.test_start + run_opts.num_tests)
1598        res_streams = np.zeros(run_opts.res_per_test * run_opts.num_trains, dtype=object)
1599        train_streams = np.zeros(run_opts.res_per_test * run_opts.num_trains, dtype=object)
1600        test_streams = np.zeros((run_opts.res_per_test * run_opts.num_trains, run_opts.num_tests), dtype=object)
1601        for i in range(run_opts.res_per_test * run_opts.num_trains):
1602            test_streams[i] = np.array([np.random.default_rng(test_seeds[s]) for \
1603                                        s in test_idxs], dtype=object)
1604        noise_streams = np.empty(run_opts.noise_realizations, dtype=object)
1605        tr, rt = np.meshgrid(np.arange(run_opts.num_trains), np.arange(run_opts.res_per_test))
1606        tr = tr.flatten() + run_opts.train_start
1607        rt = rt.flatten() + run_opts.res_start
1608        for i in range(run_opts.res_per_test * run_opts.num_trains):
1609            res_streams[i] = np.random.default_rng(res_seeds[rt[i]])
1610            train_streams[i] = np.random.default_rng(train_seeds[tr[i]])
1611        incomplete_idxs = []
1612        for i in range(tr.size):
1613            if not os.path.exists(os.path.join(run_opts.run_folder_name,
1614                                               'train_max_rms_res%d_train%d_noise%e_regtrain%d.csv' % (
1615                                                       rt[i], tr[i], run_opts.noise_values_array[-1],
1616                                                       run_opts.reg_train_times[-1]))):
1617                incomplete_idxs.append(i)
1618        print('Total idxs: %d' % tr.size)
1619        print('Incomplete idxs: %d' % len(incomplete_idxs))
1620
1621        print('Starting Ray Computation')
1622        tic = time.perf_counter()
1623        if run_opts.ifray:
1624            out_base = ray.get(
1625                [find_stability_remote.remote(run_opts, run_opts.noise_values_array, tr[i], train_streams[i], \
1626                                              rt[i], res_streams[i], test_streams[i], test_idxs, noise_streams, i) for i
1627                 in incomplete_idxs])
1628        else:
1629            out_base = [find_stability_serial(run_opts, run_opts.noise_values_array, tr[i], train_streams[i], \
1630                                              rt[i], res_streams[i], test_streams[i], test_idxs, noise_streams, i) for i
1631                        in incomplete_idxs]
1632
1633    else:
1634        res_seeds = ss_res.spawn(run_opts.res_per_test + run_opts.res_start)
1635        train_seeds = ss_train.spawn(run_opts.num_trains + run_opts.train_start)
1636        test_seeds = ss_test.spawn(run_opts.num_tests + run_opts.test_start)
1637        test_idxs = np.arange(run_opts.test_start, run_opts.test_start + run_opts.num_tests)
1638        noise_seeds = ss_noise.spawn(run_opts.noise_realizations)
1639        res_streams = np.zeros(run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size,
1640                               dtype=object)
1641        train_streams = np.zeros(run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size,
1642                                 dtype=object)
1643        test_streams = np.zeros(
1644            (run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size, run_opts.num_tests),
1645            dtype=object)
1646        noise_streams = np.zeros((run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size,
1647                                  run_opts.noise_realizations), dtype=object)
1648
1649        tnr, ntr, rtn = np.meshgrid(np.arange(run_opts.num_trains), run_opts.noise_values_array,
1650                                    np.arange(run_opts.res_per_test))
1651        tnr = tnr.flatten() + run_opts.res_start
1652        ntr = ntr.flatten()
1653        rtn = rtn.flatten() + run_opts.train_start
1654
1655        for i in range(run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size):
1656            # print(res_seeds[rtn[i]])
1657            res_streams[i] = np.random.default_rng(res_seeds[rtn[i]])
1658            train_streams[i] = np.random.default_rng(train_seeds[tnr[i]])
1659            test_streams[i] = np.array([np.random.default_rng(test_seeds[j]) for \
1660                                        j in test_idxs])
1661            noise_streams[i] = np.array([np.random.default_rng(j) for j in noise_seeds])
1662        incomplete_idxs = []
1663        for i in range(tnr.size):
1664            if not os.path.exists(os.path.join(run_opts.run_folder_name,
1665                                               'train_max_rms_res%d_train%d_noise%e_regtrain%d.csv' % (
1666                                                       rtn[i], tnr[i], ntr[i], run_opts.reg_train_times[-1]))):
1667                incomplete_idxs.append(i)
1668
1669        print('Starting Ray Computation')
1670        tic = time.perf_counter()
1671        if run_opts.ifray:
1672            out_base = ray.get([find_stability_remote.remote(run_opts, ntr[i], tnr[i], train_streams[i], \
1673                                                             rtn[i], res_streams[i], test_streams[i], test_idxs,
1674                                                             noise_streams[i], i) for i in incomplete_idxs])
1675        else:
1676            out_base = [find_stability_serial(run_opts, ntr[i], tnr[i], train_streams[i], rtn[i], \
1677                                              res_streams[i], test_streams[i], test_idxs, noise_streams[i], i) for i in
1678                        incomplete_idxs]
1679    if len(incomplete_idxs) != 0:
1680        toc = time.perf_counter()
1681        runtime = toc - tic
1682        print('Runtime over all cores: %f sec.' % (runtime))
1683        print('Ray finished.')
1684        print('Results Saved')
1685    else:
1686        print('No incomplete runs were found. Ending job.')
1687
1688    if run_opts.ifray:
1689        ray.shutdown()
1690
1691
1692def main(argv):
1693    start_reservoir_test(argv)
1694
1695
1696if __name__ == "__main__":
1697    main(sys.argv[1:])
@njit
def str_to_int(s):
23@njit
24def str_to_int(s):
25    # Converts a string to an int in numba compiled functions
26    final_index, result = len(s) - 1, 0
27    for i, v in enumerate(s):
28        result += (ord(v) - 48) * (10 ** (final_index - i))
29    return result
@njit(fastmath=True)
def mean_numba_axis1(mat):
32@njit(fastmath=True)
33def mean_numba_axis1(mat):
34    # Computes the mean over axis 1 in numba compiled functions
35    res = np.zeros(mat.shape[0])
36    for i in range(mat.shape[0]):
37        res[i] = np.mean(mat[i])
38
39    return res
@njit(fastmath=True)
def sum_numba_axis0(mat):
42@njit(fastmath=True)
43def sum_numba_axis0(mat):
44    # Computes the sum over axis 0 in numba compiled functions
45    res = np.zeros(mat.shape[1])
46    for i in range(mat.shape[1]):
47        res[i] = np.sum(mat[:, i])
48    return res
@njit(fastmath=True)
def numba_var_axis0(pred):
51@njit(fastmath=True)
52def numba_var_axis0(pred):
53    mean_all = sum_numba_axis0(pred) / pred.shape[0]
54    variances_all = sum_numba_axis0((pred - mean_all) ** 2.0) / (pred.shape[0])
55    return variances_all
@njit(fastmath=True)
def wasserstein_distance_empirical(measured_samples, true_samples):
 58@njit(fastmath=True)
 59def wasserstein_distance_empirical(measured_samples, true_samples):
 60    # Computes the wasserstein distance between the empirical CDFs of the two input sets of samples. Faster than scipy when compiled.
 61    if np.any(np.isnan(measured_samples)):
 62        return np.NAN
 63    if np.any(np.isinf(measured_samples)):
 64        return np.inf
 65    measured_samples.sort()
 66    true_samples.sort()
 67    n, m, n_inv, m_inv = (measured_samples.size, true_samples.size,
 68                          1 / measured_samples.size, 1 / true_samples.size)
 69    n_itr = 0
 70    m_itr = 0
 71    measured_cdf = 0
 72    true_cdf = 0
 73    wass_dist = 0
 74    if measured_samples[n_itr] < true_samples[m_itr]:
 75        prev_sample = measured_samples[n_itr]
 76        measured_cdf += n_inv
 77        n_itr += 1
 78    elif true_samples[m_itr] < measured_samples[n_itr]:
 79        prev_sample = true_samples[m_itr]
 80        true_cdf += m_inv
 81        m_itr += 1
 82    else:
 83        prev_sample = true_samples[m_itr]
 84        measured_cdf += n_inv
 85        true_cdf += m_inv
 86        n_itr += 1
 87        m_itr += 1
 88    while n_itr < n and m_itr < m:
 89        if measured_samples[n_itr] < true_samples[m_itr]:
 90            wass_dist += np.abs((measured_cdf - true_cdf)
 91                                * (measured_samples[n_itr] - prev_sample))
 92            prev_sample = measured_samples[n_itr]
 93            measured_cdf += n_inv
 94            n_itr += 1
 95        elif true_samples[m_itr] < measured_samples[n_itr]:
 96            wass_dist += np.abs((measured_cdf - true_cdf)
 97                                * (true_samples[m_itr] - prev_sample))
 98            prev_sample = true_samples[m_itr]
 99            true_cdf += m_inv
100            m_itr += 1
101        else:
102            wass_dist += np.abs((measured_cdf - true_cdf)
103                                * (true_samples[m_itr] - prev_sample))
104            prev_sample = true_samples[m_itr]
105            measured_cdf += n_inv
106            true_cdf += m_inv
107            n_itr += 1
108            m_itr += 1
109    if n_itr == n:
110        for itr in range(m_itr, m):
111            wass_dist += np.abs((1.0 - true_cdf) *
112                                (true_samples[itr] - prev_sample))
113            prev_sample = true_samples[itr]
114            true_cdf += m_inv
115    else:
116        for itr in range(n_itr, n):
117            wass_dist += np.abs((measured_cdf - 1.0) *
118                                (measured_samples[itr] - prev_sample))
119            prev_sample = measured_samples[itr]
120            measured_cdf += n_inv
121
122    return wass_dist
@jit(fastmath=True, nopython=True)
def numba_eigsh(A):
125@jit(fastmath=True, nopython=True)
126def numba_eigsh(A):
127    np.random.seed(0)
128    v0 = np.random.rand(A.shape[0])
129    with objmode(eigs_out='double[:]'):
130        eigs_out = eigsh(A, k=6, v0=v0, maxiter=1e5, return_eigenvectors=False)
131    return eigs_out
@jit(nopython=True, fastmath=True)
def numerical_model_wrapped( h=0.01, tau=0.1, T=300, ttsplit=5000, u0=0, system='lorenz', params=array([], shape=(2, 0), dtype=complex128)):
134@jit(nopython=True, fastmath=True)
135def numerical_model_wrapped(h=0.01, tau=0.1, T=300, ttsplit=5000, u0=0, system='lorenz',
136                        params=np.array([[], []], dtype=np.complex128)):
137    # Numba function for obtaining training and testing dynamical system time series data
138    if system == 'lorenz':
139        int_step = int(tau / h)
140        u_arr = np.ascontiguousarray(lorenzrungekutta(u0, T, tau, int_step))
141
142        u_arr[0] = (u_arr[0] - 0) / 7.929788629895004
143        u_arr[1] = (u_arr[1] - 0) / 8.9932616136662
144        u_arr[2] = (u_arr[2] - 23.596294463016896) / 8.575917849311919
145        new_params = params
146    elif system == 'KS':
147        u_arr, new_params = kursiv_predict(u0, tau=tau, T=T, params=params)
148        u_arr = np.ascontiguousarray(u_arr) / (1.1876770355823614)
149    elif system == 'KS_d2175':
150        u_arr, new_params = kursiv_predict(u0, tau=tau, T=T, d=21.75, params=params)
151        u_arr = np.ascontiguousarray(u_arr) / (1.2146066380280796)
152    else:
153        raise ValueError
154
155    u_arr_train = u_arr[:, :ttsplit + 1]
156    # u[ttsplit], the (ttsplit+1)st element, is the last in u_arr_train and the first in u_arr_test
157    u_arr_test = u_arr[:, ttsplit:]
158    return u_arr_train, u_arr_test, ttsplit, new_params
@jit(nopython=True, fastmath=True)
def numerical_model_wrapped_pred( h=0.01, tau=0.1, T=300, ttsplit=5000, u0_array=array([], shape=(2, 0), dtype=complex128), system='lorenz', params=array([], shape=(2, 0), dtype=complex128)):
161@jit(nopython=True, fastmath=True)
162def numerical_model_wrapped_pred(h=0.01, tau=0.1, T=300, ttsplit=5000, u0_array=np.array([[], []], dtype=np.complex128),
163                             system='lorenz', params=np.array([[], []], dtype=np.complex128)):
164    # Numba function for obtaining training and testing dynamical system time series data for a set of initial conditions.
165    # This is used during test to compute the map error instead of a for loop over the entire prediction period.
166    if system == 'lorenz':
167        int_step = int(tau / h)
168        u_arr = np.ascontiguousarray(
169            lorenzrungekutta_pred(u0_array, tau, int_step))
170
171        u_arr[0] = (u_arr[0] - 0) / 7.929788629895004
172        u_arr[1] = (u_arr[1] - 0) / 8.9932616136662
173        u_arr[2] = (u_arr[2] - 23.596294463016896) / 8.575917849311919
174        new_params = params
175    elif system == 'KS':
176        u_arr, new_params = kursiv_predict_pred(
177            u0_array, tau=tau, T=T, params=params)
178        u_arr = np.ascontiguousarray(u_arr) / (1.1876770355823614)
179    elif system == 'KS_d2175':
180        u_arr, new_params = kursiv_predict_pred(u0_array, tau=tau, T=T, params=params, d=21.75)
181        u_arr = np.ascontiguousarray(u_arr) / (1.2146066380280796)
182    else:
183        raise ValueError
184
185    u_arr_train = u_arr[:, :ttsplit + 1]
186    # u[ttsplit], the (ttsplit+1)st element, is the last in u_arr_train and the first in u_arr_test
187    u_arr_test = u_arr[:, ttsplit:]
188    return u_arr_train, u_arr_test, ttsplit, new_params
def getX(res, rk, x0=1, y0=1, z0=1):
191def getX(res, rk, x0=1, y0=1, z0=1):
192    # Function to obtain reservoir states when in python interpreter. Calls get_X_wrapped.
193    u_training = rk.u_arr_train
194    res.X = get_X_wrapped(np.ascontiguousarray(u_training), res.X, res.Win_data, res.Win_indices, res.Win_indptr,
195                          res.Win_shape,
196                          res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage)
197
198    return res.X
@jit(nopython=True, fastmath=True)
def get_X_wrapped( u_training, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, leakage, noise, noisetype='none', noise_scaling=0, noise_realization=0, traintype='normal'):
201@jit(nopython=True, fastmath=True)
202def get_X_wrapped(u_training, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
203                  leakage, noise, noisetype='none', noise_scaling=0, noise_realization=0, traintype='normal'):
204    # Numba compatible function for obtaining reservoir states using various types of noise.
205    # Generally returns an array of reservoir states and the noiseless training data used as input.
206    # If traintype is gradient, this function instead returns the resevoir states and the reservoir states derivatives.
207
208    if noisetype in ['gaussian', 'perturbation']:
209        if traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq'] or 'confined' in traintype:
210            # noise = gen_noise(u_training.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
211            for i in range(0, u_training[0].size):
212                res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
213                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
214                        1., u_training[:, i] + noise[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape,
215                                                                        res_X[:, i]))
216            u_training_wnoise = u_training + noise
217        elif traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
218            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
219            for i in range(0, u_training[0].size):
220                res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
221                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
222                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape,
223                                                          res_X[:, i] + noise[:, i]))
224            u_training_wnoise = u_training
225        elif traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
226            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
227            for i in range(0, u_training[0].size):
228                res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
229                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
230                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X[:, i]) + noise[:,
231                                                                                                               i])
232            u_training_wnoise = u_training
233        else:
234            raise ValueError
235        return res_X, u_training_wnoise
236    elif noisetype not in ['gaussian_onestep', 'perturbation_onestep'] and \
237            ('gaussian' in noisetype and 'step' in noisetype):
238        noise_steps = str_to_int(noisetype.replace('gaussian', '').replace(
239            'perturbation', '').replace('step', ''))
240        res_X_nonoise = np.copy(res_X)
241        for i in range(0, u_training[0].size):
242            res_X_nonoise[:, i + 1] = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
243                mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
244                    1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X_nonoise[:, i]))
245        if traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
246            # noise = gen_noise(u_training.shape[0], u_training.shape[1], str(noisetype), noise_scaling, noise_gen, noise_realization)
247            for i in range(0, u_training[0].size - noise_steps):
248                temp_x = res_X_nonoise[:, i]
249                temp_x = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
250                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
251                        1., u_training[:, i] + noise[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x))
252                for k in range(1, noise_steps):
253                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
254                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
255                            1., u_training[:, i + k] + noise[:, i + k])) + mult_vec(W_data, W_indices, W_indptr,
256                                                                                    W_shape, temp_x))
257                res_X[:, i + noise_steps] = temp_x
258            u_training_wnoise = u_training + noise
259        elif traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
260            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
261            for i in range(0, u_training[0].size - noise_steps):
262                temp_x = res_X_nonoise[:, i]
263                temp_x = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
264                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
265                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x + noise[:, i]))
266                for k in range(1, noise_steps):
267                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
268                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
269                            1., u_training[:, i + k])) + mult_vec(W_data, W_indices, W_indptr, W_shape,
270                                                                  temp_x + noise[:, i + k]))
271                res_X[:, i + noise_steps] = temp_x
272            u_training_wnoise = u_training
273        elif traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
274            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
275            for i in range(0, u_training[0].size - noise_steps):
276                temp_x = res_X_nonoise[:, i]
277                temp_x = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
278                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
279                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x) + noise[:, i])
280                for k in range(1, noise_steps):
281                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
282                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
283                            1., u_training[:, i + k])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x) + noise[
284                                                                                                                  :,
285                                                                                                                  i + k])
286                res_X[:, i + noise_steps] = temp_x
287            u_training_wnoise = u_training
288        else:
289            raise ValueError
290        return res_X, u_training_wnoise
291    elif noisetype in ['gaussian_onestep', 'perturbation_onestep']:
292        if traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
293            # noise = gen_noise(u_training.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
294            temp_x = res_X[:, 0]
295            for i in range(0, u_training[0].size):
296                res_X[:, i + 1] = (1.0 - leakage) * temp_x + leakage * np.tanh(
297                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
298                        1., u_training[:, i] + noise[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x))
299                temp_x = np.tanh(
300                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1., u_training[:, i])) + mult_vec(
301                        W_data, W_indices, W_indptr, W_shape, temp_x))
302            u_training_wnoise = u_training + noise
303        elif traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
304            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
305            temp_x = res_X[:, 0]
306            for i in range(0, u_training[0].size):
307                res_X[:, i + 1] = (1.0 - leakage) * temp_x + leakage * np.tanh(
308                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
309                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x + noise[:, i]))
310                temp_x = np.tanh(
311                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1., u_training[:, i])) + mult_vec(
312                        W_data, W_indices, W_indptr, W_shape, temp_x))
313            u_training_wnoise = u_training
314        elif traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
315            # noise = gen_noise(res_X.shape[0], u_training.shape[1], noisetype, noise_scaling, noise_gen, noise_realization)
316            temp_x = res_X[:, 0]
317            for i in range(0, u_training[0].size):
318                res_X[:, i + 1] = (1.0 - leakage) * temp_x + leakage * np.tanh(
319                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
320                        1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x) + noise[:, i])
321                temp_x = np.tanh(
322                    mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1., u_training[:, i])) + mult_vec(
323                        W_data, W_indices, W_indptr, W_shape, temp_x))
324            u_training_wnoise = u_training
325        else:
326            raise ValueError
327        return res_X, u_training_wnoise
328    elif 'gaussian' in noisetype:
329        noise_steps = str_to_int(noisetype.replace('gaussian', ''))
330
331        # noise = gen_noise(u_training.shape[0], u_training.shape[1], str(noisetype), noise_scaling, noise_gen, noise_realization)
332        res_X_nonoise = np.copy(res_X)
333        for i in range(0, u_training[0].size):
334            res_X_nonoise[:, i + 1] = (1.0 - leakage) * res_X_nonoise[:, i] + leakage * np.tanh(
335                mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
336                    1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X_nonoise[:, i]))
337        for i in range(0, u_training[0].size - noise_steps):
338            temp_x = res_X_nonoise[:, i]
339            for k in range(noise_steps):
340                if k == 0:
341                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
342                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
343                            1., u_training[:, i + k] + noise[:, i + k])) + mult_vec(W_data, W_indices, W_indptr,
344                                                                                    W_shape, temp_x))
345                else:
346                    temp_x = (1.0 - leakage) * temp_x + leakage * np.tanh(
347                        mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
348                            1., u_training[:, i + k])) + mult_vec(W_data, W_indices, W_indptr, W_shape, temp_x))
349            res_X[:, i + noise_steps] = temp_x
350        u_training_wnoise = u_training + noise
351        return res_X, u_training_wnoise
352
353    elif traintype in ['sylvester_wD'] or 'gradient' in traintype or 'regzero' in traintype:
354        rsvr_size = res_X.shape[0]
355        res_D = np.zeros((rsvr_size, u_training.shape[1] + 1))
356        for i in range(0, u_training[0].size):
357            res_internal = mult_vec(Win_data, Win_indices, Win_indptr, Win_shape,
358                                    np.append(1., u_training[:, i])) + mult_vec(
359                W_data, W_indices, W_indptr, W_shape, res_X[:, i])
360            res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + \
361                              leakage * np.tanh(res_internal)
362            res_D[:, i + 1] = leakage / (np.power(np.cosh(res_internal), 2.0))
363
364        return res_X, res_D
365    else:
366        for i in range(0, u_training[0].size):
367            res_X[:, i + 1] = (1.0 - leakage) * res_X[:, i] + leakage * np.tanh(
368                mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(
369                    1., u_training[:, i])) + mult_vec(W_data, W_indices, W_indptr, W_shape, res_X[:, i]))
370        return res_X, u_training
def gen_noise_driver(run_opts, data_shape, res_shape, noise_scaling, noise_stream):
373def gen_noise_driver(run_opts, data_shape, res_shape, noise_scaling, noise_stream):
374    # Generates an array of noise states for a given noise type, number of noise realizations, and array of random number generators
375    if run_opts.noisetype in ['gaussian', 'perturbation']:
376        if run_opts.traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean',
377                                  'rq'] or 'confined' in run_opts.traintype:
378            noise = gen_noise(data_shape[0], data_shape[1], run_opts.noisetype,
379                              noise_scaling, noise_stream, run_opts.noise_realizations)
380        elif run_opts.traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
381            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
382                              noise_scaling, noise_stream, run_opts.noise_realizations)
383        elif run_opts.traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
384            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
385                              noise_scaling, noise_stream, run_opts.noise_realizations)
386        else:
387            raise ValueError
388    elif run_opts.noisetype not in ['gaussian_onestep', 'perturbation_onestep'] and \
389            ('gaussian' in run_opts.noisetype and 'step' in run_opts.noisetype):
390        if run_opts.traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
391            noise = gen_noise(data_shape[0], data_shape[1], str(
392                run_opts.noisetype), noise_scaling, noise_stream, run_opts.noise_realizations)
393        elif run_opts.traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
394            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
395                              noise_scaling, noise_stream, run_opts.noise_realizations)
396        elif run_opts.traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
397            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
398                              noise_scaling, noise_stream, run_opts.noise_realizations)
399        else:
400            raise ValueError
401    elif run_opts.noisetype in ['gaussian_onestep', 'perturbation_onestep']:
402        if run_opts.traintype in ['normal', 'rmean', 'rplusq', 'rmeanq', 'rqmean', 'rq']:
403            noise = gen_noise(data_shape[0], data_shape[1], run_opts.noisetype,
404                              noise_scaling, noise_stream, run_opts.noise_realizations)
405        elif run_opts.traintype in ['normalres1', 'rmeanres1', 'rplusqres1', 'rmeanqres1', 'rqmeanres1', 'rqres1']:
406            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
407                              noise_scaling, noise_stream, run_opts.noise_realizations)
408        elif run_opts.traintype in ['normalres2', 'rmeanres2', 'rplusqres2', 'rmeanqres2', 'rqmeanres2', 'rqres2']:
409            noise = gen_noise(res_shape[0], data_shape[1], run_opts.noisetype,
410                              noise_scaling, noise_stream, run_opts.noise_realizations)
411        else:
412            raise ValueError
413    elif 'gaussian' in run_opts.noisetype:
414        noise = gen_noise(data_shape[0], data_shape[1], str(
415            run_opts.noisetype), noise_scaling, noise_stream, run_opts.noise_realizations)
416    elif 'gradient' in run_opts.noisetype or run_opts.noisetype == 'none':
417        noise = np.zeros((1, data_shape[0], data_shape[1]))
418    else:
419        raise ValueError
420    return noise
def gen_noise( noise_size, noise_length, noisetype, noise_scaling, noise_stream, noise_realizations):
423def gen_noise(noise_size, noise_length, noisetype, noise_scaling, noise_stream, noise_realizations):
424    # Generates an array of noise vectors
425    noise = np.zeros((noise_realizations, noise_size, noise_length))
426    if 'gaussian' in noisetype:
427        for i in range(noise_realizations):
428            noise[i] = noise_stream[i].standard_normal(
429                (noise_size, noise_length)) * np.sqrt(noise_scaling)
430    if noisetype in ['perturbation', 'perturbation_onestep']:
431        for noise_realization in noise_realizations:
432            if noise_realization < noise_size:
433                noise[noise_realization, noise_realization] = np.ones(
434                    noise_length) * np.sqrt(noise_scaling)
435            elif noise_realization < 2 * noise_length:
436                noise[noise_realization, noise_realization -
437                      noise_size] = -np.ones(noise_length) * np.sqrt(noise_scaling)
438            else:
439                raise ValueError
440
441    return noise
def get_states(run_opts, res, rk, noise, noise_scaling=0):
444def get_states(run_opts, res, rk, noise, noise_scaling=0):
445    # Obtains the matrices used to train the reservoir using either linear regression or a sylvester equation
446    # (in the case of Sylvester or Sylvester_wD training types)
447    # Calls the numba compatible wrapped function
448    if run_opts.traintype == 'getD':
449        Dn = getD(np.ascontiguousarray(rk.u_arr_train), res.X, res.Win_data, res.Win_indices, res.Win_indptr,
450                  res.Win_shape, res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage,
451                  run_opts.discard_time, noise, run_opts.noisetype, noise_scaling, run_opts.noise_realizations,
452                  run_opts.traintype, run_opts.squarenodes)
453        return Dn
454    elif run_opts.traintype in ['sylvester', 'sylvester_wD']:
455        res.data_trstates, res.states_trstates, res.Y_train, res.X_train, res.left_mat = get_states_wrapped(
456            np.ascontiguousarray(rk.u_arr_train), run_opts.reg_train_times, res.X, res.Win_data, res.Win_indices,
457            res.Win_indptr, res.Win_shape, res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage,
458            run_opts.discard_time, noise, run_opts.noisetype, noise_scaling, run_opts.noise_realizations,
459            run_opts.traintype, run_opts.squarenodes)
460    else:
461        res.data_trstates, res.states_trstates, res.Y_train, res.X_train, res.gradient_reg = get_states_wrapped(
462            np.ascontiguousarray(rk.u_arr_train), run_opts.reg_train_times, res.X, res.Win_data, res.Win_indices,
463            res.Win_indptr, res.Win_shape, res.W_data, res.W_indices, res.W_indptr, res.W_shape, res.leakage,
464            run_opts.discard_time, noise, run_opts.noisetype, noise_scaling, run_opts.noise_realizations,
465            run_opts.traintype, run_opts.squarenodes)
@jit(nopython=True, fastmath=True)
def get_squared(X, rsvr_size, squarenodes, dim=0):
468@jit(nopython=True, fastmath=True)
469def get_squared(X, rsvr_size, squarenodes, dim=0):
470    X_aug = np.copy(X)
471    if not squarenodes:
472        return X_aug
473    else:
474        X_out = np.vstack(
475            (X_aug[0].reshape(1, -1), X_aug[1:rsvr_size + 1], X_aug[1:rsvr_size + 1] ** 2.0, X_aug[rsvr_size + 1:]))
476        return X_out
@jit(nopython=True, fastmath=True)
def get_squared_vec(X, rsvr_size, squarenodes):
479@jit(nopython=True, fastmath=True)
480def get_squared_vec(X, rsvr_size, squarenodes):
481    X_aug = np.copy(X)
482    if not squarenodes:
483        return X_aug
484    else:
485        X_out = np.concatenate(
486            (np.array([X_aug[0]]), X_aug[1:rsvr_size + 1], X_aug[1:rsvr_size + 1] ** 2.0, X_aug[rsvr_size + 1:]))
487        return X_out
@jit(nopython=True, fastmath=True)
def get_states_wrapped( u_arr_train, reg_train_times, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, leakage, skip, noise, noisetype='none', noise_scaling=0, noise_realizations=1, traintype='normal', squarenodes=False, q=0):
 490@jit(nopython=True, fastmath=True)
 491def get_states_wrapped(u_arr_train, reg_train_times, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data,
 492                       W_indices, W_indptr, W_shape, leakage, skip, noise, noisetype='none',
 493                       noise_scaling=0, noise_realizations=1, traintype='normal', squarenodes=False, q=0):
 494    # Numba compatible function to obtain the matrices used to train the reservoir using either linear regression or a sylvester equation.
 495    # The type of matrices depends on the traintype, number of noise realizations, and noisetype
 496    res_X = np.ascontiguousarray(res_X)
 497    u_arr_train = np.ascontiguousarray(u_arr_train)
 498    n, d = u_arr_train.shape
 499    rsvr_size, res_d = res_X.shape
 500    if squarenodes:
 501        res_feature_size = 2 * rsvr_size
 502    else:
 503        res_feature_size = rsvr_size
 504    data_trstates = np.zeros((n, res_feature_size + n + 1))
 505    states_trstates = np.zeros((reg_train_times.size, res_feature_size + n + 1, res_feature_size + n + 1))
 506    gradient_reg = np.zeros((reg_train_times.size, res_feature_size + n + 1, res_feature_size + n + 1))
 507    Y_train = np.ascontiguousarray(u_arr_train[:, skip:-1])
 508    print('Reg train times:')
 509    print(reg_train_times)
 510    reg_train_fracs = 1.0 / reg_train_times
 511    print('Reg train fracs:')
 512    print(reg_train_fracs)
 513    if traintype in ['normal', 'normalres1', 'normalres2']:
 514        # Normal multi-noise training that sums all reservoir state outer products
 515        for i in range(noise_realizations):
 516            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 517                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 518                                                 noise_scaling, i, traintype)
 519            X = X[:, skip:(res_d - 2)]
 520            X_train = np.ascontiguousarray(np.concatenate(
 521                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 522            X_train = get_squared(X_train, rsvr_size, squarenodes)
 523            data_trstates += Y_train @ X_train.T
 524            states_trstates[0] += X_train @ X_train.T
 525        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 526    elif traintype in ['rmeanq', 'rmeanqres1', 'rmeanqres2']:
 527        # Training using the mean and the rescaled sum of the perturbations from the mean
 528        for i in range(noise_realizations):
 529            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 530                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 531                                                 noise_scaling, i, traintype)
 532            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 533            X_train = np.ascontiguousarray(np.concatenate(
 534                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 535            X_train = get_squared(X_train, rsvr_size, squarenodes)
 536            if i == 0:
 537                X_train_mean = np.zeros(X_train.shape)
 538                X_all = np.zeros((X_train.shape[0], X_train.shape[1], noise_realizations))
 539            X_train_mean += X_train / noise_realizations
 540            X_all[:, :, i] = X_train
 541        data_trstates = Y_train @ X_train_mean.T
 542        states_trstates[0] = X_train_mean @ X_train_mean.T
 543        for i in range(1, reg_train_times.size):
 544            states_trstates[i] = np.copy(states_trstates[0])
 545        for (j, reg_train_time), prev_reg_train_time in zip(enumerate(reg_train_times),
 546                                                            np.append(np.zeros(1, dtype=np.int64),
 547                                                                      reg_train_times[:-1])):
 548            for i in range(noise_realizations):
 549                Q_fit = X_all[:, prev_reg_train_time:reg_train_time, i] - \
 550                        X_train_mean[:, prev_reg_train_time:reg_train_time]
 551                Q_fit_2 = Q_fit @ Q_fit.T
 552                for k in range(j, reg_train_times.size):
 553                    states_trstates[k] += Q_fit_2 * reg_train_fracs[k] / noise_realizations
 554        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 555    elif traintype in ['rmean', 'rmeanres1', 'rmeanres2']:
 556        # Training using the mean only
 557        for i in range(noise_realizations):
 558            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 559                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 560                                                 noise_scaling, i, traintype)
 561            X = X[:, skip:(res_d - 2)]
 562            X_train = np.ascontiguousarray(np.concatenate(
 563                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 564            X_train = get_squared(X_train, rsvr_size, squarenodes)
 565            if i == 0:
 566                X_train_mean = np.zeros(X_train.shape)
 567            X_train_mean += X_train / noise_realizations
 568        data_trstates = Y_train @ X_train_mean.T
 569        states_trstates[0] = X_train_mean @ X_train_mean.T
 570        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 571    elif traintype in ['rqmean', 'rqmeanres1', 'rqmeanres2']:
 572        # Training using the noiseless reservoir and the rescaled perturbations from the mean
 573        X_0, u_arr_train_noise = get_X_wrapped(
 574            u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 575            leakage, noise[0])
 576        X_0 = np.ascontiguousarray(X_0[:, skip:(res_d - 2)])
 577        X_train_0 = np.ascontiguousarray(np.concatenate(
 578            (np.ones((1, d - (skip + 1))), X_0, u_arr_train[:, skip - 1:-2]), axis=0))
 579        X_train_0 = get_squared(X_train_0, rsvr_size, squarenodes)
 580        X_all = np.zeros(
 581            (X_train_0.shape[0], X_train_0.shape[1], noise_realizations))
 582        for i in range(noise_realizations):
 583            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 584                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 585                                                 noise_scaling, i, traintype)
 586            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 587            X_train = np.ascontiguousarray(np.concatenate(
 588                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 589            X_train = get_squared(X_train, rsvr_size, squarenodes)
 590            if i == 0:
 591                X_train_mean = np.zeros(X_train.shape)
 592            X_train_mean += X_train / noise_realizations
 593            X_all[:, :, i] = X_train
 594        data_trstates = Y_train @ X_train_0.T
 595        states_trstates[0] = X_train_0 @ X_train_0.T
 596        for i in range(1, reg_train_times.size):
 597            states_trstates[i] = np.copy(states_trstates[0])
 598        prev_reg_train_time = 0
 599        for j, reg_train_time in enumerate(reg_train_times):
 600            for i in range(noise_realizations):
 601                Q_fit = X_all[:, prev_reg_train_time:reg_train_time, i] - \
 602                        X_train_mean[:, prev_reg_train_time:reg_train_time]
 603                for k in range(j, reg_train_times.size):
 604                    states_trstates[k] += (Q_fit @ Q_fit.T) * reg_train_fracs[k] / noise_realizations
 605            prev_reg_train_time = reg_train_time
 606        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 607    elif traintype in ['rq', 'rqres1', 'rqres2']:
 608        # Training using the noiseless reservoir and the rescaled perturbations from the noiseless reservoir
 609        X_0, u_arr_train_noise = get_X_wrapped(
 610            u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 611            leakage, noise[0])
 612        X_0 = np.ascontiguousarray(X_0[:, skip:(res_d - 2)])
 613        X_train_0 = np.ascontiguousarray(np.concatenate(
 614            (np.ones((1, d - (skip + 1))), X_0, u_arr_train[:, skip - 1:-2]), axis=0))
 615        X_train_0 = get_squared(X_train_0, rsvr_size, squarenodes)
 616        X_all = np.zeros(
 617            (X_train_0.shape[0], X_train_0.shape[1], noise_realizations))
 618        for i in range(noise_realizations):
 619            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 620                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 621                                                 noise_scaling, i, traintype)
 622            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 623            X_train = np.ascontiguousarray(np.concatenate(
 624                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 625            X_train = get_squared(X_train, rsvr_size, squarenodes)
 626            X_all[:, :, i] = X_train
 627        data_trstates = Y_train @ X_train_0.T
 628        states_trstates[0] = X_train_0 @ X_train_0.T
 629        for i in range(1, reg_train_times.size):
 630            states_trstates[i] = np.copy(states_trstates[0])
 631        prev_reg_train_time = 0
 632        for j, reg_train_time in enumerate(reg_train_times):
 633            for i in range(noise_realizations):
 634                Q_fit = X_all[:, prev_reg_train_time:reg_train_time, i] - \
 635                        X_train_0[:, prev_reg_train_time:reg_train_time]
 636                for k in range(j, reg_train_times.size):
 637                    states_trstates[k] += (Q_fit @ Q_fit.T) * reg_train_fracs[k] / noise_realizations
 638            prev_reg_train_time = reg_train_time
 639        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 640    elif traintype in ['rplusq', 'rplusqres1', 'rplusqres2']:
 641        # Training using the sum of the outer products of the sum of the noiseless reservoir and
 642        # the perturbation from the mean
 643        X_0, u_arr_train_noise = get_X_wrapped(
 644            u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
 645            leakage, noise[0])
 646        X_0 = np.ascontiguousarray(X_0[:, skip:(res_d - 2)])
 647        X_train_0 = np.ascontiguousarray(np.concatenate(
 648            (np.ones((1, d - (skip + 1))), X_0, u_arr_train[:, skip - 1:-2]), axis=0))
 649        X_train_0 = get_squared(X_train_0, rsvr_size, squarenodes)
 650        X_all = np.zeros(
 651            (X_train_0.shape[0], X_train_0.shape[1], noise_realizations))
 652        for i in range(noise_realizations):
 653            X, u_arr_train_noise = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 654                                                 W_data, W_indices, W_indptr, W_shape, leakage, noise[i], noisetype,
 655                                                 noise_scaling, i, traintype)
 656            X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 657            X_train = np.ascontiguousarray(np.concatenate(
 658                (np.ones((1, d - (skip + 1))), X, u_arr_train_noise[:, skip - 1:-2]), axis=0))
 659            X_train = get_squared(X_train, rsvr_size, squarenodes)
 660            if i == 0:
 661                X_train_mean = np.zeros(X_train.shape)
 662            X_train_mean += X_train / noise_realizations
 663            X_all[:, :, i] = X_train
 664        Y_fit = Y_train
 665        X_fit = X_all[:, :, 0] - X_train_mean + X_train_0
 666        for i in range(1, noise_realizations):
 667            Y_fit = np.append(Y_fit, Y_train, axis=1)
 668            X_fit = np.append(
 669                X_fit, X_all[:, :, i] - X_train_mean + X_train_0, axis=1)
 670        data_trstates = Y_fit @ X_fit.T
 671        states_trstates[0] = X_fit @ X_fit.T
 672        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 673    elif 'gradientk' in traintype and 'only' not in traintype:
 674        # Linearized k-step noise
 675        k = str_to_int(traintype.replace('gradientk', ''))
 676        reg_train_fracs = 1.0 / (reg_train_times - (k - 1))
 677        sparse_cutoff = 0.89
 678        break_flag = False
 679        X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
 680                             W_indptr,
 681                             W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 682        X = X[:, skip:(res_d - 2)]
 683        D = D[:, skip:(res_d - 2)]
 684        X_train = np.concatenate(
 685            (np.ones((1, d - (skip + 1))), X, u_arr_train[:, skip - 1:-2]), axis=0)
 686        X_train = get_squared(X_train, rsvr_size, squarenodes)
 687        gradient_reg_base = np.zeros((res_feature_size + n + 1, res_feature_size + n + 1))
 688        Win_nobias_data, Win_nobias_indices, Win_nobias_indptr, Win_nobias_shape = \
 689            get_Win_nobias(Win_data, Win_indices, Win_indptr, Win_shape)
 690        D_n_datas = List()
 691        D_n_indices = List()
 692        D_n_indptrs = List()
 693        D_n_shape = np.array([res_feature_size + n + 1, n])
 694        E_n_datas = List()
 695        E_n_indices = List()
 696        E_n_indptrs = List()
 697        E_n_shape = np.array([res_feature_size + n + 1, res_feature_size + n + 1])
 698        reg_comp_datas = List()
 699        reg_comp_indices = List()
 700        reg_comp_indptrs = List()
 701        reg_comp_shape = np.array([res_feature_size + n + 1, n])
 702        W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape = construct_jac_mat_csc_csc(
 703            Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, rsvr_size, n,
 704            squarenodes)
 705        leakage_data, leakage_indices, leakage_indptr, leakage_shape = construct_leakage_mat(rsvr_size, n, leakage,
 706                                                                                             squarenodes)
 707        reg_sum_avg_runtime = 0.
 708        E_n_avg_runtime = 0.
 709        reg_mult_avg_runtime = 0.
 710        D_n_avg_runtime = 0.
 711
 712        for i in range(k):
 713            D_n_data, D_n_idx, D_n_indptr = get_D_n(D[:, i], X[:, i], Win_nobias_data, Win_nobias_indices,
 714                                                    Win_nobias_indptr, Win_nobias_shape, D_n_shape, rsvr_size,
 715                                                    res_feature_size, n, squarenodes)
 716            D_n_datas.append(np.ascontiguousarray(D_n_data))
 717            D_n_indices.append(np.ascontiguousarray(D_n_idx))
 718            D_n_indptrs.append(np.ascontiguousarray(D_n_indptr))
 719        if k > 1:
 720            for i in range(1, k):
 721                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i], E_n_shape, rsvr_size, W_mat_data,
 722                                                        W_mat_indices, W_mat_indptr, W_mat_shape, leakage_data,
 723                                                        leakage_indices,
 724                                                        leakage_indptr, leakage_shape, squarenodes)
 725                E_n_datas.append(np.ascontiguousarray(E_n_data))
 726                E_n_indices.append(np.ascontiguousarray(E_n_idx))
 727                E_n_indptrs.append(np.ascontiguousarray(E_n_indptr))
 728
 729        for i in range(k - 1):
 730            reg_comp_data, reg_comp_idx, reg_comp_indptr = \
 731                np.copy(D_n_datas[i]), np.copy(D_n_indices[i]), np.copy(D_n_indptrs[i])
 732            if k > 1:
 733                for j in range(i, k - 1):
 734                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 735                        E_n_datas[j], E_n_indices[j], E_n_indptrs[j], E_n_shape,
 736                        reg_comp_data, reg_comp_idx, reg_comp_indptr, reg_comp_shape)
 737            reg_comp_datas.append(np.ascontiguousarray(reg_comp_data))
 738            reg_comp_indices.append(np.ascontiguousarray(reg_comp_idx))
 739            reg_comp_indptrs.append(np.ascontiguousarray(reg_comp_indptr))
 740        reg_comp_datas.append(np.ascontiguousarray(D_n_datas[-1]))
 741        reg_comp_indices.append(np.ascontiguousarray(D_n_indices[-1]))
 742        reg_comp_indptrs.append(np.ascontiguousarray(D_n_indptrs[-1]))
 743        sparsity = np.array([reg_comp_datas[j].size / (reg_comp_shape[0] * reg_comp_shape[1]) for j in range(k)])
 744
 745        for i in range(k, X.shape[1]):
 746            for j in range(k):
 747                if sparsity[j] < sparse_cutoff:
 748                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 749                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
 750                else:
 751                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
 752                                                                    reg_comp_indptrs[j], reg_comp_shape)
 753            assign_grad_reg = (i == reg_train_times)
 754            if np.any(assign_grad_reg):
 755                print(assign_grad_reg)
 756                print(gradient_reg[assign_grad_reg].shape)
 757                print((gradient_reg_base * reg_train_fracs[assign_grad_reg]).shape)
 758                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 759            if assign_grad_reg[-1]:
 760                break_flag = True
 761                break
 762            if k > 1:
 763                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i],
 764                                                        E_n_shape, rsvr_size, W_mat_data, W_mat_indices, W_mat_indptr,
 765                                                        W_mat_shape,
 766                                                        leakage_data, leakage_indices, leakage_indptr, leakage_shape,
 767                                                        squarenodes)
 768                E_n_datas[k - 2] = np.ascontiguousarray(E_n_data)
 769                E_n_indices[k - 2] = np.ascontiguousarray(E_n_idx)
 770                E_n_indptrs[k - 2] = np.ascontiguousarray(E_n_indptr)
 771            for j in range(k - 1):
 772                if k > 1:
 773                    if sparsity[j + 1] < sparse_cutoff:
 774                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_conv_mult(
 775                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 776                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 777                    else:
 778                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 779                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 780                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 781                    reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j] = \
 782                        np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 783                        np.ascontiguousarray(reg_comp_indptr)
 784            reg_comp_data, reg_comp_idx, reg_comp_indptr = get_D_n(D[:, i], X[:, i],
 785                                                                   Win_nobias_data, Win_nobias_indices,
 786                                                                   Win_nobias_indptr, Win_nobias_shape, D_n_shape,
 787                                                                   rsvr_size, res_feature_size, n, squarenodes)
 788            reg_comp_datas[k - 1], reg_comp_indices[k - 1], reg_comp_indptrs[k - 1] = \
 789                np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 790                np.ascontiguousarray(reg_comp_indptr)
 791        if not break_flag:
 792            for j in range(k):
 793                if sparsity[j] < sparse_cutoff:
 794                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 795                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
 796                else:
 797                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
 798                                                                    reg_comp_indptrs[j], reg_comp_shape)
 799            assign_grad_reg = i + 1 == reg_train_times
 800            if np.any(assign_grad_reg):
 801                print(assign_grad_reg)
 802                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 803        data_trstates = Y_train @ X_train.T
 804        states_trstates[0] = X_train @ X_train.T
 805        for i in range(1, reg_train_times.size):
 806            states_trstates[i] = np.ascontiguousarray(states_trstates[0])
 807        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 808    elif 'gradientk' in traintype and 'only' in traintype:
 809        # Linearized k-step noise
 810        k = str_to_int(traintype.replace('gradientk', '').replace('only', ''))
 811        reg_train_fracs = 1.0 / (reg_train_times - (k - 1))
 812        sparse_cutoff = 0.89
 813        break_flag = False
 814        X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
 815                             W_indptr,
 816                             W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 817        X = X[:, skip:(res_d - 2)]
 818        D = D[:, skip:(res_d - 2)]
 819        X_train = np.concatenate(
 820            (np.ones((1, d - (skip + 1))), X, u_arr_train[:, skip - 1:-2]), axis=0)
 821        X_train = get_squared(X_train, rsvr_size, squarenodes)
 822        gradient_reg_base = np.zeros((res_feature_size + n + 1, res_feature_size + n + 1))
 823        Win_nobias_data, Win_nobias_indices, Win_nobias_indptr, Win_nobias_shape = \
 824            get_Win_nobias(Win_data, Win_indices, Win_indptr, Win_shape)
 825        D_n_datas = List()
 826        D_n_indices = List()
 827        D_n_indptrs = List()
 828        D_n_shape = np.array([res_feature_size + n + 1, n])
 829        E_n_datas = List()
 830        E_n_indices = List()
 831        E_n_indptrs = List()
 832        E_n_shape = np.array([res_feature_size + n + 1, res_feature_size + n + 1])
 833        reg_comp_datas = List()
 834        reg_comp_indices = List()
 835        reg_comp_indptrs = List()
 836        reg_comp_shape = np.array([res_feature_size + n + 1, n])
 837        W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape = construct_jac_mat_csc_csc(
 838            Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, rsvr_size, n,
 839            squarenodes)
 840        leakage_data, leakage_indices, leakage_indptr, leakage_shape = construct_leakage_mat(rsvr_size, n, leakage,
 841                                                                                             squarenodes)
 842        reg_sum_avg_runtime = 0.
 843        E_n_avg_runtime = 0.
 844        reg_mult_avg_runtime = 0.
 845        D_n_avg_runtime = 0.
 846
 847        for i in range(k):
 848            D_n_data, D_n_idx, D_n_indptr = get_D_n(D[:, i], X[:, i], Win_nobias_data, Win_nobias_indices,
 849                                                    Win_nobias_indptr, Win_nobias_shape, D_n_shape, rsvr_size,
 850                                                    res_feature_size, n, squarenodes)
 851            D_n_datas.append(np.ascontiguousarray(D_n_data))
 852            D_n_indices.append(np.ascontiguousarray(D_n_idx))
 853            D_n_indptrs.append(np.ascontiguousarray(D_n_indptr))
 854        if k > 1:
 855            for i in range(1, k):
 856                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i], E_n_shape, rsvr_size, W_mat_data,
 857                                                        W_mat_indices, W_mat_indptr, W_mat_shape, leakage_data,
 858                                                        leakage_indices,
 859                                                        leakage_indptr, leakage_shape, squarenodes)
 860                E_n_datas.append(np.ascontiguousarray(E_n_data))
 861                E_n_indices.append(np.ascontiguousarray(E_n_idx))
 862                E_n_indptrs.append(np.ascontiguousarray(E_n_indptr))
 863
 864        for i in range(k - 1):
 865            reg_comp_data, reg_comp_idx, reg_comp_indptr = \
 866                np.copy(D_n_datas[i]), np.copy(D_n_indices[i]), np.copy(D_n_indptrs[i])
 867            if k > 1:
 868                for j in range(i, k - 1):
 869                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 870                        E_n_datas[j], E_n_indices[j], E_n_indptrs[j], E_n_shape,
 871                        reg_comp_data, reg_comp_idx, reg_comp_indptr, reg_comp_shape)
 872            reg_comp_datas.append(np.ascontiguousarray(reg_comp_data))
 873            reg_comp_indices.append(np.ascontiguousarray(reg_comp_idx))
 874            reg_comp_indptrs.append(np.ascontiguousarray(reg_comp_indptr))
 875        reg_comp_datas.append(np.ascontiguousarray(D_n_datas[-1]))
 876        reg_comp_indices.append(np.ascontiguousarray(D_n_indices[-1]))
 877        reg_comp_indptrs.append(np.ascontiguousarray(D_n_indptrs[-1]))
 878        sparsity = np.array([reg_comp_datas[j].size / (reg_comp_shape[0] * reg_comp_shape[1]) for j in range(k)])
 879
 880        for i in range(k, X.shape[1]):
 881            if sparsity[j] < sparse_cutoff:
 882                gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 883                    reg_comp_datas[0], reg_comp_indices[0], reg_comp_indptrs[0], reg_comp_shape)
 884            else:
 885                gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[0], reg_comp_indices[0],
 886                                                                reg_comp_indptrs[0], reg_comp_shape)
 887            assign_grad_reg = (i == reg_train_times)
 888            if np.any(assign_grad_reg):
 889                print(assign_grad_reg)
 890                print(gradient_reg[assign_grad_reg].shape)
 891                print((gradient_reg_base * reg_train_fracs[assign_grad_reg]).shape)
 892                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 893            if assign_grad_reg[-1]:
 894                break_flag = True
 895                break
 896            if k > 1:
 897                E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i],
 898                                                        E_n_shape, rsvr_size, W_mat_data, W_mat_indices, W_mat_indptr,
 899                                                        W_mat_shape,
 900                                                        leakage_data, leakage_indices, leakage_indptr, leakage_shape,
 901                                                        squarenodes)
 902                E_n_datas[k - 2] = np.ascontiguousarray(E_n_data)
 903                E_n_indices[k - 2] = np.ascontiguousarray(E_n_idx)
 904                E_n_indptrs[k - 2] = np.ascontiguousarray(E_n_indptr)
 905            for j in range(k - 1):
 906                if k > 1:
 907                    if sparsity[j + 1] < sparse_cutoff:
 908                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_conv_mult(
 909                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 910                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 911                    else:
 912                        reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
 913                            E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
 914                            reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
 915                    reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j] = \
 916                        np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 917                        np.ascontiguousarray(reg_comp_indptr)
 918            reg_comp_data, reg_comp_idx, reg_comp_indptr = get_D_n(D[:, i], X[:, i],
 919                                                                   Win_nobias_data, Win_nobias_indices,
 920                                                                   Win_nobias_indptr, Win_nobias_shape, D_n_shape,
 921                                                                   rsvr_size, res_feature_size, n, squarenodes)
 922            reg_comp_datas[k - 1], reg_comp_indices[k - 1], reg_comp_indptrs[k - 1] = \
 923                np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
 924                np.ascontiguousarray(reg_comp_indptr)
 925        if not break_flag:
 926            if sparsity[j] < sparse_cutoff:
 927                gradient_reg_base += matrix_sparse_sparseT_conv_mult(
 928                    reg_comp_datas[0], reg_comp_indices[0], reg_comp_indptrs[0], reg_comp_shape)
 929            else:
 930                gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[0], reg_comp_indices[0],
 931                                                                reg_comp_indptrs[0], reg_comp_shape)
 932            assign_grad_reg = i + 1 == reg_train_times
 933            if np.any(assign_grad_reg):
 934                print(assign_grad_reg)
 935                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
 936        data_trstates = Y_train @ X_train.T
 937        states_trstates[0] = X_train @ X_train.T
 938        for i in range(1, reg_train_times.size):
 939            states_trstates[i] = np.ascontiguousarray(states_trstates[0])
 940        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
 941        """
 942    elif traintype == 'sylvester':
 943        # Sylvester regularization w/o derivative
 944        X, p = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
 945                           W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 946        X = np.ascontiguousarray(X[:, skip:(res_d - 2)])
 947        X_train = np.ascontiguousarray(np.concatenate(
 948            (np.ones((1, d-(skip+1))), X, u_arr_train[:, skip-1:-2]), axis=0))
 949        data_trstates = Y_train @ X_train.T
 950        data_trstates[:, 1:rsvr_size+1] += noise_scaling**2/noise_realizations * \
 951            matrix_dot_left_T(W_mat_data, W_mat_indices,
 952                              W_mat_indptr, W_mat_shape, Win[:, 1:])
 953        states_trstates[0] = X_train @ X_train.T
 954        left_mat_base = -noise_scaling**2/noise_realizations * \
 955            (Win[:, 1:].T @ Win[:, 1:])
 956        left_mat = left_mat_base.reshape(1, left_mat_base.shape[0], left_mat_base.shape[1])
 957        return data_trstates, states_trstates, Y_train, X_train, left_mat
 958        
 959    elif traintype == 'sylvester_wD':
 960        # Sylvester regularization with derivative
 961        X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
 962                           W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 963        X = X[:, skip:(res_d - 2)]
 964        D = D[:, skip:(res_d - 2)]
 965        X_train = np.concatenate(
 966            (np.ones((1, d-(skip+1))), X, u_arr_train[:, skip-1:-2]), axis=0)
 967        Dmean = mean_numba_axis1(D)
 968        temp_mat = np.diag(Dmean) @ Win[:, 1:]
 969        target_correction = matrix_dot_left_T(
 970            W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape, temp_mat)
 971        left_mat_base = temp_mat.T @ temp_mat
 972        target_correction = noise_scaling**2/noise_realizations * target_correction
 973        left_mat_base = -noise_scaling**2/noise_realizations * left_mat_base
 974        left_mat = left_mat_base.reshape(1, left_mat_base.shape[0], left_mat_base.shape[1])
 975        data_trstates = Y_train @ X_train.T
 976        data_trstates[:, 1:rsvr_size+1] += target_correction
 977        states_trstates[0] = X_train @ X_train.T
 978        return data_trstates, states_trstates, Y_train, X_train, left_mat
 979        """
 980    elif 'regzero' in traintype:
 981        # Linearized k-step noise
 982        k = str_to_int(traintype.replace('regzerok', ''))
 983        reg_train_fracs = 1.0 / (reg_train_times - (k - 1))
 984        sparse_cutoff = 0.89
 985        break_flag = False
 986        X, tmp = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
 987                               W_indptr,
 988                               W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
 989        X = X[:, skip:(res_d - 2)]
 990        X_train = np.concatenate(
 991            (np.ones((1, d - (skip + 1))), X, u_arr_train[:, skip - 1:-2]), axis=0)
 992        X_train = get_squared(X_train, rsvr_size, squarenodes)
 993        data_trstates = Y_train @ X_train.T
 994        states_trstates[0] = X_train @ X_train.T
 995        for i in range(1, reg_train_times.size):
 996            states_trstates[i] = np.ascontiguousarray(states_trstates[0])
 997        X_zero, D_zero = get_X_wrapped(np.zeros(u_arr_train.shape), res_X, Win_data, Win_indices, Win_indptr, Win_shape,
 998                                       W_data, W_indices, W_indptr,
 999                                       W_shape, leakage, noise[0], noisetype, noise_scaling, 1, traintype)
1000        X = X_zero[:, skip:(res_d - 2)]
1001        D = D_zero[:, skip:(res_d - 2)]
1002        gradient_reg_base = np.zeros((res_feature_size + n + 1, res_feature_size + n + 1))
1003        Win_nobias_data, Win_nobias_indices, Win_nobias_indptr, Win_nobias_shape = \
1004            get_Win_nobias(Win_data, Win_indices, Win_indptr, Win_shape)
1005        D_n_datas = List()
1006        D_n_indices = List()
1007        D_n_indptrs = List()
1008        D_n_shape = np.array([res_feature_size + n + 1, n])
1009        E_n_datas = List()
1010        E_n_indices = List()
1011        E_n_indptrs = List()
1012        E_n_shape = np.array([res_feature_size + n + 1, res_feature_size + n + 1])
1013        reg_comp_datas = List()
1014        reg_comp_indices = List()
1015        reg_comp_indptrs = List()
1016        reg_comp_shape = np.array([res_feature_size + n + 1, n])
1017        W_mat_data, W_mat_indices, W_mat_indptr, W_mat_shape = construct_jac_mat_csc_csc(
1018            Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, rsvr_size, n,
1019            squarenodes)
1020        leakage_data, leakage_indices, leakage_indptr, leakage_shape = construct_leakage_mat(rsvr_size, n, leakage,
1021                                                                                             squarenodes)
1022        reg_sum_avg_runtime = 0.
1023        E_n_avg_runtime = 0.
1024        reg_mult_avg_runtime = 0.
1025        D_n_avg_runtime = 0.
1026
1027        for i in range(k):
1028            D_n_data, D_n_idx, D_n_indptr = get_D_n(D[:, i], X[:, i], Win_nobias_data, Win_nobias_indices,
1029                                                    Win_nobias_indptr, Win_nobias_shape, D_n_shape, rsvr_size,
1030                                                    res_feature_size, n, squarenodes)
1031            D_n_datas.append(np.ascontiguousarray(D_n_data))
1032            D_n_indices.append(np.ascontiguousarray(D_n_idx))
1033            D_n_indptrs.append(np.ascontiguousarray(D_n_indptr))
1034        for i in range(1, k):
1035            E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i], E_n_shape, rsvr_size, W_mat_data,
1036                                                    W_mat_indices, W_mat_indptr, W_mat_shape, leakage_data,
1037                                                    leakage_indices,
1038                                                    leakage_indptr, leakage_shape, squarenodes)
1039            E_n_datas.append(np.ascontiguousarray(E_n_data))
1040            E_n_indices.append(np.ascontiguousarray(E_n_idx))
1041            E_n_indptrs.append(np.ascontiguousarray(E_n_indptr))
1042
1043        for i in range(k - 1):
1044            reg_comp_data, reg_comp_idx, reg_comp_indptr = \
1045                np.copy(D_n_datas[i]), np.copy(D_n_indices[i]), np.copy(D_n_indptrs[i])
1046            for j in range(i, k - 1):
1047                reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
1048                    E_n_datas[j], E_n_indices[j], E_n_indptrs[j], E_n_shape,
1049                    reg_comp_data, reg_comp_idx, reg_comp_indptr, reg_comp_shape)
1050            reg_comp_datas.append(np.ascontiguousarray(reg_comp_data))
1051            reg_comp_indices.append(np.ascontiguousarray(reg_comp_idx))
1052            reg_comp_indptrs.append(np.ascontiguousarray(reg_comp_indptr))
1053        reg_comp_datas.append(np.ascontiguousarray(D_n_datas[-1]))
1054        reg_comp_indices.append(np.ascontiguousarray(D_n_indices[-1]))
1055        reg_comp_indptrs.append(np.ascontiguousarray(D_n_indptrs[-1]))
1056        sparsity = np.array([reg_comp_datas[j].size / (reg_comp_shape[0] * reg_comp_shape[1]) for j in range(k)])
1057
1058        for i in range(k, X.shape[1]):
1059            for j in range(k):
1060                if sparsity[j] < sparse_cutoff:
1061                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
1062                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
1063                else:
1064                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
1065                                                                    reg_comp_indptrs[j], reg_comp_shape)
1066            assign_grad_reg = (i == reg_train_times)
1067            if np.any(assign_grad_reg):
1068                print(assign_grad_reg)
1069                print(gradient_reg[assign_grad_reg].shape)
1070                print((gradient_reg_base * reg_train_fracs[assign_grad_reg]).shape)
1071                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
1072            if assign_grad_reg[-1]:
1073                break_flag = True
1074                break
1075            E_n_data, E_n_idx, E_n_indptr = get_E_n(D[:, i], X[:, i],
1076                                                    E_n_shape, rsvr_size, W_mat_data, W_mat_indices, W_mat_indptr,
1077                                                    W_mat_shape,
1078                                                    leakage_data, leakage_indices, leakage_indptr, leakage_shape,
1079                                                    squarenodes)
1080            E_n_datas[k - 2] = np.ascontiguousarray(E_n_data)
1081            E_n_indices[k - 2] = np.ascontiguousarray(E_n_idx)
1082            E_n_indptrs[k - 2] = np.ascontiguousarray(E_n_indptr)
1083
1084            for j in range(k - 1):
1085                if sparsity[j + 1] < sparse_cutoff:
1086                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_conv_mult(
1087                        E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
1088                        reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
1089                else:
1090                    reg_comp_data, reg_comp_idx, reg_comp_indptr, tmp = matrix_sparse_sparse_mult(
1091                        E_n_datas[k - 2], E_n_indices[k - 2], E_n_indptrs[k - 2], E_n_shape,
1092                        reg_comp_datas[j + 1], reg_comp_indices[j + 1], reg_comp_indptrs[j + 1], reg_comp_shape)
1093                reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j] = \
1094                    np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
1095                    np.ascontiguousarray(reg_comp_indptr)
1096            reg_comp_data, reg_comp_idx, reg_comp_indptr = get_D_n(D[:, i], X[:, i],
1097                                                                   Win_nobias_data, Win_nobias_indices,
1098                                                                   Win_nobias_indptr, Win_nobias_shape, D_n_shape,
1099                                                                   rsvr_size, res_feature_size, n, squarenodes)
1100            reg_comp_datas[k - 1], reg_comp_indices[k - 1], reg_comp_indptrs[k - 1] = \
1101                np.ascontiguousarray(reg_comp_data), np.ascontiguousarray(reg_comp_idx), \
1102                np.ascontiguousarray(reg_comp_indptr)
1103        if not break_flag:
1104            for j in range(k):
1105                if sparsity[j] < sparse_cutoff:
1106                    # gradient_reg += reg_components[:, :, j] @ reg_components[:, :, j].T
1107                    gradient_reg_base += matrix_sparse_sparseT_conv_mult(
1108                        reg_comp_datas[j], reg_comp_indices[j], reg_comp_indptrs[j], reg_comp_shape)
1109                else:
1110                    gradient_reg_base += matrix_sparse_sparseT_mult(reg_comp_datas[j], reg_comp_indices[j],
1111                                                                    reg_comp_indptrs[j], reg_comp_shape)
1112            assign_grad_reg = i + 1 == reg_train_times
1113            if np.any(assign_grad_reg):
1114                print(assign_grad_reg)
1115                gradient_reg[assign_grad_reg] = gradient_reg_base * reg_train_fracs[assign_grad_reg]
1116        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
1117    else:
1118        # Noiseless training
1119        data_trstates = np.zeros((n, res_feature_size + 1 + n), dtype=np.float64)
1120        states_trstates[0] = np.zeros(
1121            (n + res_feature_size + 1, n + res_feature_size + 1), dtype=np.float64)
1122        X_train = np.zeros((n + res_feature_size + 1, d - (skip + 1)))
1123        return data_trstates, states_trstates, Y_train, X_train, gradient_reg
@jit(nopython=True, fastmath=True)
def getD( u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, leakage, noise, skip, noisetype='none', noise_scaling=0, noise_realizations=1, traintype='normal', squarenodes=False):
1126@jit(nopython=True, fastmath=True)
1127def getD(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape,
1128         leakage, noise, skip, noisetype='none',
1129         noise_scaling=0, noise_realizations=1, traintype='normal', squarenodes=False):
1130    n, d = u_arr_train.shape
1131    rsvr_size, res_d = res_X.shape
1132    Y_train = u_arr_train[:, skip + 1:]
1133    X, D = get_X_wrapped(u_arr_train, res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices,
1134                         W_indptr, W_shape, leakage, noise, 'none', 0, 0, 'gradient', squarenodes)
1135    return [D[:, skip:(res_d - 2)]]
def predict(res, u0, steps=1000, squarenodes=False):
1141def predict(res, u0, steps=1000, squarenodes=False):
1142    # Wrapper for the prediction function
1143    Y = predictwrapped(res.X, res.Win_data, res.Win_indices, res.Win_indptr, res.Win_shape, res.W_data, res.W_indices,
1144                       res.W_indptr, res.W_shape, res.Wout, res.leakage, u0, steps, squarenodes)
1145    return Y
@jit(nopython=True, fastmath=True)
def predictwrapped( res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, Wout, leakage, u0, steps, squarenodes=False):
1148@jit(nopython=True, fastmath=True)
1149def predictwrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, Wout,
1150                   leakage, u0, steps, squarenodes=False):
1151    # Numba compatible prediction function
1152    Y = np.empty((Win_shape[1] - 1, steps + 1))
1153    X = np.empty((res_X.shape[0], steps + 1))
1154
1155    Y[:, 0] = u0
1156    X[:, 0] = res_X[:, -2]
1157
1158    for i in range(0, steps):
1159        X[:, i + 1] = (1 - leakage) * X[:, i] + leakage * np.tanh(
1160            mult_vec(Win_data, Win_indices, Win_indptr, Win_shape, np.append(1.,
1161                                                                             Y[:, i])) + mult_vec(W_data, W_indices,
1162                                                                                                  W_indptr, W_shape,
1163                                                                                                  X[:, i]))
1164        Y[:, i + 1] = Wout @ get_squared_vec(np.concatenate((np.array([1.]), X[:, i + 1], Y[:, i])),
1165                                             X.shape[0], squarenodes)
1166
1167    return Y
def get_test_data(run_opts, test_stream, overall_idx, rkTime, split):
1170def get_test_data(run_opts, test_stream, overall_idx, rkTime, split):
1171    # Function for obtaining test data sets used to validate reservoir performance
1172    # Uses an array of random number generators
1173    if run_opts.system == 'lorenz':
1174        ic = test_stream[0].random(3) * 2 - 1
1175        u0 = np.array([ic[0], ic[1], 30*ic[2]])
1176    elif run_opts.system in ['KS', 'KS_d2175']:
1177        u0 = (test_stream[0].random(64) * 2 - 1) * 0.6
1178        u0 = u0 - np.mean(u0)
1179    transient = 2000
1180    u_arr_train_nonoise, u_arr_test, p, params = numerical_model_wrapped(tau=run_opts.tau, T=rkTime + transient + split,
1181                                                                     ttsplit=split + transient,
1182                                                                     u0=u0, system=run_opts.system)
1183    u_arr_train_nonoise = u_arr_train_nonoise[:, transient:]
1184    rktest_u_arr_train_nonoise = np.zeros(
1185        (u_arr_train_nonoise.shape[0], u_arr_train_nonoise.shape[1], run_opts.num_tests))
1186    rktest_u_arr_test = np.zeros(
1187        (u_arr_test.shape[0], u_arr_test.shape[1], run_opts.num_tests))
1188    rktest_u_arr_train_nonoise[:, :, 0] = u_arr_train_nonoise
1189    rktest_u_arr_test[:, :, 0] = u_arr_test
1190    """
1191    print('Test data %d' % 0)
1192    print(rktest_u_arr_test[-3:,-3:,0])
1193    """
1194    for i in range(1, run_opts.num_tests):
1195        # np.random.seed(i)
1196        if run_opts.system == 'lorenz':
1197            ic = test_stream[i].random(3) * 2 - 1
1198            u0 = np.array([ic[0],ic[1], 30 * ic[2]])
1199        elif run_opts.system in ['KS', 'KS_d2175']:
1200            u0 = (test_stream[i].random(64) * 2 - 1) * 0.6
1201            u0 = u0 - np.mean(u0)
1202        u_arr_train_nonoise, rktest_u_arr_test[:, :, i], p, params = numerical_model_wrapped(tau=run_opts.tau,
1203                                                                                         T=rkTime + transient + split,
1204                                                                                         ttsplit=split + transient,
1205                                                                                         u0=u0, system=run_opts.system,
1206                                                                                         params=params)
1207        rktest_u_arr_train_nonoise[:, :, i] = u_arr_train_nonoise[:, transient:]
1208        """
1209        print('Test data %d' % i)
1210        print(rktest_u_arr_test[-3:,-3:,i])
1211        """
1212
1213    if run_opts.save_truth and overall_idx == 0:
1214        for (i, test) in enumerate(np.arange(run_opts.test_start, run_opts.test_start + run_opts.num_tests)):
1215            np.savetxt(os.path.join(run_opts.run_folder_name,
1216                                    '%s_tau%0.2f_true_test_%d.csv' % (run_opts.system, run_opts.tau, test)),
1217                       rktest_u_arr_test[:, :, i], delimiter=',')
1218
1219    return rktest_u_arr_train_nonoise, rktest_u_arr_test, params
def test( run_opts, res, Wout_itr, noise_in, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max, rkTime=1000, split=3000, params=array([], shape=(2, 0), dtype=complex128), showMapError=False, showTrajectories=False, showHist=False):
1222def test(run_opts, res, Wout_itr, noise_in, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max, rkTime=1000,
1223         split=3000, params=np.array([[], []], dtype=np.complex128), showMapError=False, showTrajectories=False,
1224         showHist=False):
1225    # Wrapper function for the numba compatible test function.
1226
1227    stable_count, mean_rms, max_rms, variances, valid_time, rms, preds, wass_dist, pmap_max, pmap_max_wass_dist = test_wrapped(
1228        res.X, res.Win_data, res.Win_indices, res.Win_indptr, res.Win_shape, res.W_data, res.W_indices, res.W_indptr,
1229        res.W_shape, res.Wout[Wout_itr], res.leakage, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max,
1230        run_opts.num_tests, rkTime, split, noise_in, run_opts.system, params=params, pmap=run_opts.pmap,
1231        max_valid_time=run_opts.max_valid_time, squarenodes=run_opts.squarenodes, savepred=run_opts.savepred,
1232        save_time_rms=run_opts.save_time_rms)
1233
1234    return stable_count / run_opts.num_tests, mean_rms, max_rms, variances, valid_time, rms, preds, wass_dist, pmap_max, pmap_max_wass_dist
@jit(nopython=True, fastmath=True)
def test_wrapped( res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, Wout, leakage, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max, num_tests, rkTime, split, noise_in, system='lorenz', tau=0.1, params=array([], shape=(2, 0), dtype=complex128), pmap=False, max_valid_time=500, squarenodes=False, savepred=False, save_time_rms=False):
1237@jit(nopython=True, fastmath=True)
1238def test_wrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, Wout,
1239                 leakage, rktest_u_arr_train_nonoise, rktest_u_arr_test, true_pmap_max, num_tests, rkTime, split,
1240                 noise_in, system='lorenz', tau=0.1, params=np.array([[], []], dtype=np.complex128), pmap=False,
1241                 max_valid_time=500, squarenodes=False, savepred=False, save_time_rms=False):
1242    # Numba compatable function for testing trained reservoir performance against true system time series
1243    stable_count = 0
1244    num_vt_tests = ((rkTime - split)) // max_valid_time
1245    valid_time = np.zeros((num_tests, num_vt_tests))
1246    max_rms = np.zeros(num_tests)
1247    mean_rms = np.zeros(num_tests)
1248    means = np.zeros(num_tests)
1249    variances = np.zeros(num_tests)
1250    wass_dist = np.zeros(num_tests)
1251    pmap_max = []
1252    pmap_max_wass_dist = np.zeros(num_tests)
1253    if savepred:
1254        preds = np.zeros((num_tests, rktest_u_arr_test.shape[0], (rkTime - split) + 1))
1255    else:
1256        preds = np.empty((1, 1, 1), dtype=np.double)
1257    if save_time_rms:
1258        rms = np.zeros((num_tests, (rkTime - split)))
1259    else:
1260        rms = np.empty((1, 1), dtype=np.double)
1261
1262    # print(num_tests)
1263    for i in range(num_tests):
1264        with objmode(test_tic='double'):
1265            test_tic = time.perf_counter()
1266        res_X = (np.zeros((res_X.shape[0], split + 2)) * 2 - 1)
1267
1268        # sets res.X
1269        res_X, p = get_X_wrapped(np.ascontiguousarray(
1270            rktest_u_arr_train_nonoise[:, :, i]), res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data,
1271            W_indices, W_indptr, W_shape, leakage, noise_in)
1272        pred_full = predictwrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
1273                                   W_shape,
1274                                   Wout, leakage, u0=rktest_u_arr_test[:, 0, i], steps=(rkTime - split),
1275                                   squarenodes=squarenodes)
1276        if savepred:
1277            preds[i] = pred_full
1278        error = np.zeros(max_valid_time)
1279        if pmap:
1280            if system == 'lorenz':
1281                calc_pred = np.stack((pred_full[0] * 7.929788629895004,
1282                                      pred_full[1] * 8.9932616136662,
1283                                      pred_full[2] * 8.575917849311919 + 23.596294463016896))
1284                # wass_dist[i] = wasserstein_distance_empirical(calc_pred.flatten(), true_trajectory.flatten())
1285                pred_pmap_max = poincare_max(calc_pred, np.arange(pred_full.shape[0]))
1286            elif system == 'KS':
1287                # wass_dist[i] = wasserstein_distance_empirical(pred.flatten()*1.1876770355823614, true_trajectory.flatten())
1288                pred_pmap_max = poincare_max(pred_full * 1.1876770355823614, np.arange(pred_full.shape[0]))
1289            elif system == 'KS_d2175':
1290                pred_pmap_max = poincare_max(pred_full * 1.2146066380280796, np.arange(pred_full.shape[0]))
1291
1292            pmap_max.append(pred_pmap_max)
1293            for j in range(rktest_u_arr_test.shape[0]):
1294                if j == 0:
1295                    pred_pmap_max_all = pred_pmap_max[j]
1296            pmap_max_wass_dist[i] = wasserstein_distance_empirical(pred_pmap_max_all, true_pmap_max)
1297        else:
1298            pmap_max_wass_dist[i] = np.nan
1299
1300        if system == 'KS':
1301            vt_cutoff = 0.2 * 1.3697994268693887
1302        else:
1303            vt_cutoff = 0.2 * np.sqrt(2)
1304        check_vt = True
1305        array_compute = False
1306        pred = pred_full[:, :max_valid_time]
1307        for k in range(num_vt_tests):
1308            for j in range(1, pred.shape[1]):
1309                error[j] = np.sqrt(
1310                    np.mean((pred[:, j] - rktest_u_arr_test[:, k * max_valid_time + j, i]) ** 2.0))
1311
1312                if error[j] < vt_cutoff and check_vt:
1313                    valid_time[i, k] = j
1314                else:
1315                    # if check_vt:
1316                    check_vt = False
1317            #print('Valid Time')
1318            #print(valid_time[i, k])
1319            res_X = np.zeros((res_X.shape[0], max_valid_time + 2))
1320            res_X, p = get_X_wrapped(np.ascontiguousarray(
1321                rktest_u_arr_test[:, k * max_valid_time:(k + 1) * max_valid_time + 1, i]), res_X, Win_data, Win_indices,
1322                Win_indptr, Win_shape, W_data, W_indices, W_indptr, W_shape, leakage, noise_in)
1323            if k < (num_vt_tests - 1):
1324                pred = predictwrapped(res_X, Win_data, Win_indices, Win_indptr, Win_shape, W_data, W_indices, W_indptr,
1325                                      W_shape,
1326                                      Wout, leakage, u0=rktest_u_arr_test[:, (k + 1) * max_valid_time, i],
1327                                      steps=max_valid_time - 1, squarenodes=squarenodes)
1328                check_vt = True
1329        if array_compute:
1330            if system == 'lorenz':
1331                rkmap_u_arr_train = numerical_model_wrapped_pred(u0_array=np.stack((pred_full[0] * 7.929788629895004,
1332                                                                pred_full[1] * 8.9932616136662,
1333                                                                pred_full[2] * 8.575917849311919 + 23.596294463016896)),
1334                                                             h=0.01, system=system, params=params, tau=tau,
1335                                                             ttsplit=pred_full.shape[1])[0]
1336            elif system == 'KS':
1337                u0 = pred_full * 1.1876770355823614
1338                rkmap_u_arr_train = numerical_model_wrapped_pred(
1339                    u0_array=u0, h=tau, T=1, system=system, params=params, ttsplit=pred_full.shape[1])[0]
1340            elif system == 'KS_d2175':
1341                u0 = pred_full * 1.2146066380280796
1342                rkmap_u_arr_train = numerical_model_wrapped_pred(
1343                    u0_array=u0, h=tau, T=1, system=system, params=params, ttsplit=pred_full.shape[1])[0]
1344            # print(rkmap_u_arr_train[0,:10])
1345            x2y2z2 = sum_numba_axis0(
1346                (pred_full[:, 1:] - rkmap_u_arr_train[:, :-1]) ** 2.0)
1347        else:
1348            x2y2z2 = np.zeros(pred_full[0].size - 1)
1349            for j in range(1, pred_full[0].size):
1350
1351                if system == 'lorenz':
1352                    rkmap_u_arr_train = \
1353                        numerical_model_wrapped(u0 = np.array([pred_full[0][j - 1] * 7.929788629895004,
1354                                            pred_full[1][j - 1] * 8.9932616136662,
1355                                            pred_full[2][j - 1] * 8.575917849311919 + 23.596294463016896]),
1356                                            h=0.01, T=1, tau=tau, system=system, params=params)[0]
1357                elif system == 'KS':
1358                    u0 = pred_full[:, j - 1] * (1.1876770355823614)
1359                    rkmap_u_arr_train = numerical_model_wrapped(h=tau, T=1, u0=u0, system=system, params=params)[0]
1360                elif system == 'KS_d2175':
1361                    u0 = pred_full[:, j - 1] * (1.2146066380280796)
1362                    rkmap_u_arr_train = numerical_model_wrapped(h=tau, T=1, u0=u0, system=system, params=params)[0]
1363
1364                x2y2z2[j - 1] = np.sum((pred_full[:, j] - rkmap_u_arr_train[:, 1]) ** 2)
1365        rms_test = np.sqrt(x2y2z2 / pred_full.shape[0])
1366        if save_time_rms:
1367            rms[i] = rms_test
1368        max_rms[i] = np.max(rms_test)
1369        mean_rms[i] = np.mean(rms_test)
1370        if system == 'lorenz':
1371            means[i] = np.mean(pred_full[0])
1372            variances[i] = np.var(pred_full[0])
1373        elif system in ['KS', 'KS_d2175']:
1374            means[i] = np.mean(pred_full.flatten())
1375            variances[i] = np.var(pred_full.flatten())
1376        if mean_rms[i] < 5e-3 and 0.9 < variances[i] and variances[i] < 1.1:
1377            stable_count += 1
1378        with objmode(test_toc='double'):
1379            test_toc = time.perf_counter()
1380        test_time = test_toc - test_tic
1381
1382    return stable_count, mean_rms, max_rms, variances, valid_time, rms, preds, wass_dist, pmap_max, pmap_max_wass_dist
def generate_res(run_opts, res_gen, res_itr, rk, noise_stream, noise_scaling=0):
1385def generate_res(run_opts, res_gen, res_itr, rk, noise_stream, noise_scaling=0):
1386    # Function for generating a reservoir and obtaining matrices used for training the reservoir
1387    reservoir = Reservoir(run_opts, res_gen, res_itr, rk.u_arr_train.shape[0])
1388    data_shape = rk.u_arr_train.shape
1389    res_shape = reservoir.X.shape
1390    noise_in = gen_noise_driver(run_opts, data_shape, res_shape, noise_scaling, noise_stream)
1391    get_states(run_opts, reservoir, rk, noise_in, noise_scaling)
1392    return reservoir, noise_in
def optim_func( run_opts, res_out, res, noise_in, noise, noise_idx, rktest_u_arr_train_nonoise, rktest_u_arr_test, alpha, alpha_idx, true_pmap_max, rkTime=400, split=2000, params=array([], shape=(2, 0), dtype=complex128)):
1395def optim_func(run_opts, res_out, res, noise_in, noise, noise_idx, rktest_u_arr_train_nonoise, rktest_u_arr_test, alpha,
1396               alpha_idx, true_pmap_max, rkTime=400, split=2000, params=np.array([[], []], dtype=np.complex128)):
1397    # Function for training and testing the performance of a reservoir trained using a particular regularization parameter
1398    num_eigenvals = 500
1399    if run_opts.squarenodes:
1400        res_feature_size = res.rsvr_size * 2
1401    else:
1402        res_feature_size = res.rsvr_size
1403    if run_opts.prior == 'zero':
1404        idenmat = np.identity(
1405            res_feature_size + 1 + rktest_u_arr_train_nonoise.shape[0]) * alpha
1406        prior = np.zeros(res.data_trstates.shape)
1407    elif run_opts.prior == 'input_pass':
1408        idenmat = np.identity(
1409            res_feature_size + 1 + rktest_u_arr_train_nonoise.shape[0]) * alpha
1410        prior = np.concatenate((np.zeros((rktest_u_arr_train_nonoise.shape[0], 1 + res_feature_size)),
1411                                np.identity(rktest_u_arr_train_nonoise.shape[0])), axis=1) * alpha
1412
1413    num_reg_train_times = res.gradient_reg.shape[0]
1414    res.Wout = np.zeros((num_reg_train_times, rktest_u_arr_train_nonoise.shape[0],
1415                         rktest_u_arr_train_nonoise.shape[0] + 1 + res_feature_size))
1416    trainlen_mult = 1.0 / res.Y_train.shape[1]
1417    for i in range(num_reg_train_times):
1418        if run_opts.traintype not in ['sylvester', 'sylvester_wD']:
1419            res.Wout[i] = np.transpose(solve(np.transpose(
1420                res.states_trstates[i] * trainlen_mult + noise * res.gradient_reg[i] + idenmat),
1421                np.transpose(res.data_trstates * trainlen_mult + prior)))
1422        else:
1423            res.Wout[i] = solve_sylvester(
1424                res.left_mat[i], res.states_trstates[i] + idenmat, res.data_trstates)
1425        train_preds = res.Wout[i] @ res.X_train
1426        train_rms = np.sqrt(np.mean((train_preds - res.Y_train) ** 2.0, axis=0))
1427        res_out.train_mean_rms_out[:, :, i] = np.mean(train_rms)
1428        res_out.train_max_rms_out[:, :, i] = np.max(train_rms)
1429        if run_opts.save_eigenvals and alpha_idx == 0:
1430            eigenvals_out = np.linalg.eigvalsh(res.gradient_reg[i])
1431            res_out.grad_eigenvals[:, i] = eigenvals_out[eigenvals_out.size:eigenvals_out.size - num_eigenvals - 1: -1]
1432        res_out.stable_frac_out[noise_idx, alpha_idx, i], res_out.mean_rms_out[noise_idx, alpha_idx, i], \
1433        res_out.max_rms_out[noise_idx, alpha_idx, i], res_out.variances_out[noise_idx, alpha_idx, i], \
1434        res_out.valid_time_out[noise_idx, alpha_idx, i], res_out.rms_out[noise_idx, alpha_idx, i], \
1435        res_out.pred_out[noise_idx, alpha_idx, i], res_out.wass_dist_out[noise_idx, alpha_idx, i], \
1436        res_out.pmap_max_out[noise_idx, alpha_idx, i], res_out.pmap_max_wass_dist_out[noise_idx, alpha_idx, i] \
1437            = test(run_opts, res, i, noise_in, rktest_u_arr_train_nonoise, rktest_u_arr_test,
1438                   true_pmap_max, rkTime=rkTime, split=split, params=params)
def get_res_results( run_opts, res_itr, res_gen, rk, noise, noise_stream, rktest_u_arr_train_nonoise, rktest_u_arr_test, rkTime_test, split_test, params, train_seed, true_pmap_max):
1441def get_res_results(run_opts, res_itr, res_gen, rk, noise, noise_stream,
1442                    rktest_u_arr_train_nonoise, rktest_u_arr_test, rkTime_test, split_test, params,
1443                    train_seed, true_pmap_max):
1444    # Function for generating, training, and testing the performance of a reservoir given an input set of testing data time series,
1445    # a set of regularization values, and a set of noise magnitudes
1446    tic = time.perf_counter()
1447    print('Starting res %d' % res_itr)
1448    reservoir, noise_in = generate_res(run_opts, res_gen, res_itr, rk, noise_stream, noise)
1449
1450    toc = time.perf_counter()
1451    print('Res states found for itr %d, runtime: %f sec.' % (res_itr, toc - tic))
1452    num_vt_tests = (rkTime_test - split_test) // run_opts.max_valid_time
1453
1454    if not isinstance(noise, np.ndarray):
1455        noise_array = np.array([noise])
1456    else:
1457        noise_array = noise
1458    res_out = ResOutput(run_opts, noise)
1459    for noise_idx, noise in enumerate(noise_array):
1460        noise_tic = time.perf_counter()
1461        min_optim_func = lambda alpha, alpha_idx: optim_func(run_opts, res_out, reservoir, noise_in[noise_idx], noise,
1462                                                             noise_idx, \
1463                                                             rktest_u_arr_train_nonoise, rktest_u_arr_test, alpha,
1464                                                             alpha_idx,
1465                                                             true_pmap_max, rkTime_test, split_test, params)
1466        func_vals = np.zeros(run_opts.reg_values.size)
1467        for j, alpha_value in enumerate(run_opts.reg_values):
1468            print('Regularization: ', run_opts.reg_values[j])
1469            if run_opts.debug_mode:
1470                min_optim_func(alpha_value, j)
1471            else:
1472                try:
1473                    min_optim_func(alpha_value, j)
1474                except:
1475                    print('Training unsucessful for alpha:')
1476                    print(alpha_value)
1477        noise_toc = time.perf_counter()
1478        print('Noise test time: %f sec.' % (noise_toc - noise_tic))
1479    toc = time.perf_counter()
1480    runtime = toc - tic
1481    print('Iteration runtime: %f sec.' % runtime)
1482    return res_out, train_seed, noise_array, res_itr
def find_stability( run_opts, noise, train_seed, train_gen, res_itr, res_gen, test_stream, test_idxs, noise_stream, overall_idx):
1485def find_stability(run_opts, noise, train_seed, train_gen, res_itr, res_gen, test_stream, test_idxs, noise_stream,
1486                   overall_idx):
1487    # Main function for training and testing reservoir performance. This function first generates the training and testing data,
1488    # the passes to get_res_results to obtain th reservoir performance.
1489    import warnings
1490    warnings.filterwarnings("ignore")
1491    if isinstance(noise, np.ndarray):
1492        print_str = 'Noise: ['
1493        for i in range(noise.size):
1494            print_str += '%e, ' % noise[i]
1495        print_str += ']'
1496        print(print_str)
1497    else:
1498        print('Noise: %e' % noise)
1499    print('Training seed: %d' % train_seed)
1500    warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
1501    if run_opts.system == 'lorenz':
1502        if run_opts.test_time == 0:
1503            rkTime_test = 4000
1504        else:
1505            rkTime_test = run_opts.test_time
1506        split_test = run_opts.sync_time
1507    elif run_opts.system in ['KS', 'KS_d2175']:
1508        if run_opts.test_time == 0:
1509            rkTime_test = int(16000)
1510        else:
1511            rkTime_test = int(run_opts.test_time)
1512        split_test = int(run_opts.sync_time)
1513
1514    rktest_u_arr_train_nonoise, rktest_u_arr_test, params = get_test_data(
1515        run_opts, test_stream, overall_idx, rkTime=rkTime_test, split=split_test)
1516    # np.random.seed(train_seed)
1517    if run_opts.system == 'lorenz':
1518        ic = train_gen.random(3) * 2 - 1
1519        rk = NumericalModel(u0 = np.array([ic[0], ic[1], 30 * ic[2]]), tau=run_opts.tau,
1520                        T=run_opts.train_time + run_opts.discard_time,
1521                        ttsplit=run_opts.train_time + run_opts.discard_time, system=run_opts.system, params=params)
1522    elif run_opts.system in ['KS', 'KS_d2175']:
1523        u0 = 0.6 * (train_gen.random(64) * 2 - 1)
1524        u0 = u0 - np.mean(u0)
1525        rk = NumericalModel(tau=run_opts.tau, T=run_opts.train_time + run_opts.discard_time,
1526                        ttsplit=run_opts.train_time + run_opts.discard_time, u0=u0, system=run_opts.system,
1527                        params=params)
1528    if run_opts.pmap:
1529        true_pmap_max_filename = run_opts.root_folder + \
1530                                 '%s_tau%0.2f_true_pmap_max.csv' % (run_opts.system, run_opts.tau)
1531        if os.name == 'nt' and len(true_pmap_max_filename) >= 260:
1532            true_pmap_max_filename = get_windows_path(true_pmap_max_filename)
1533        true_pmap_max = np.loadtxt(true_pmap_max_filename, delimiter=',')
1534        print('Snippet of true poincare map:')
1535        print(true_pmap_max[:5])
1536    else:
1537        true_pmap_max = np.zeros(100)
1538
1539    res_out, train_seed, noise_array, itr = get_res_results(run_opts, res_itr, res_gen, \
1540                                                            rk, noise, noise_stream, rktest_u_arr_train_nonoise,
1541                                                            rktest_u_arr_test, \
1542                                                            rkTime_test, split_test, params, train_seed, true_pmap_max)
1543
1544    if not isinstance(noise, np.ndarray):
1545        noise_array = np.array([noise])
1546    else:
1547        noise_array = noise
1548
1549    res_out.save(run_opts, noise_array, res_itr, train_seed, test_idxs)
1550    # toc_global = time.perf_counter()
1551    # print('Total Runtime: %s sec.' % (toc_global - tic_global))
1552
1553    return rkTime_test, split_test
def find_stability_serial(*args):
1564def find_stability_serial(*args):
1565    return find_stability(*args)
def start_reservoir_test(argv=None, run_opts=None):
1568def start_reservoir_test(argv=None, run_opts=None):
1569    # Main driver function for obtaining reservoir performance. This function processes input arguments and
1570    # creates random number generators for the different reservoirs, trainings, noise arrays, and tests.
1571    # It then calls find_stability in a loop, processes the output from find_stability, and saves the output to a folder.
1572
1573    if not isinstance(argv, type(None)) and isinstance(run_opts, type(None)):
1574        run_opts = RunOpts(argv)
1575    print(run_opts.run_folder_name)
1576    if run_opts.machine == 'personal':
1577        if run_opts.ifray:
1578            ray.init(num_cpus=run_opts.num_cpus)
1579    elif run_opts.machine == 'deepthought2':
1580        if run_opts.ifray:
1581            ray.init(address=os.environ["ip_head"])
1582
1583    if run_opts.traintype in ['normal', 'normalres1', 'normalres2', 'rmean', 'rmeanres1', 'rmeanres2',
1584                              'rplusq', 'rplusqres1', 'rplusqres2']:
1585        run_opts.reg_values = run_opts.reg_values * run_opts.noise_realizations
1586    if run_opts.reg_train_times is None:
1587        run_opts.reg_train_times = np.array([run_opts.train_time - run_opts.discard_time])
1588
1589    ss_res = np.random.SeedSequence(12)
1590    ss_train = np.random.SeedSequence(34)
1591    ss_test = np.random.SeedSequence(56)
1592    ss_noise = np.random.SeedSequence(78)
1593    if run_opts.traintype in ['gradient1', 'gradient2',
1594                              'gradient12'] or 'gradientk' in run_opts.traintype or 'regzerok' in run_opts.traintype:
1595        res_seeds = ss_res.spawn(run_opts.res_per_test + run_opts.res_start)
1596        train_seeds = ss_train.spawn(run_opts.num_trains + run_opts.train_start)
1597        test_seeds = ss_test.spawn(run_opts.num_tests + run_opts.test_start)
1598        test_idxs = np.arange(run_opts.test_start, run_opts.test_start + run_opts.num_tests)
1599        res_streams = np.zeros(run_opts.res_per_test * run_opts.num_trains, dtype=object)
1600        train_streams = np.zeros(run_opts.res_per_test * run_opts.num_trains, dtype=object)
1601        test_streams = np.zeros((run_opts.res_per_test * run_opts.num_trains, run_opts.num_tests), dtype=object)
1602        for i in range(run_opts.res_per_test * run_opts.num_trains):
1603            test_streams[i] = np.array([np.random.default_rng(test_seeds[s]) for \
1604                                        s in test_idxs], dtype=object)
1605        noise_streams = np.empty(run_opts.noise_realizations, dtype=object)
1606        tr, rt = np.meshgrid(np.arange(run_opts.num_trains), np.arange(run_opts.res_per_test))
1607        tr = tr.flatten() + run_opts.train_start
1608        rt = rt.flatten() + run_opts.res_start
1609        for i in range(run_opts.res_per_test * run_opts.num_trains):
1610            res_streams[i] = np.random.default_rng(res_seeds[rt[i]])
1611            train_streams[i] = np.random.default_rng(train_seeds[tr[i]])
1612        incomplete_idxs = []
1613        for i in range(tr.size):
1614            if not os.path.exists(os.path.join(run_opts.run_folder_name,
1615                                               'train_max_rms_res%d_train%d_noise%e_regtrain%d.csv' % (
1616                                                       rt[i], tr[i], run_opts.noise_values_array[-1],
1617                                                       run_opts.reg_train_times[-1]))):
1618                incomplete_idxs.append(i)
1619        print('Total idxs: %d' % tr.size)
1620        print('Incomplete idxs: %d' % len(incomplete_idxs))
1621
1622        print('Starting Ray Computation')
1623        tic = time.perf_counter()
1624        if run_opts.ifray:
1625            out_base = ray.get(
1626                [find_stability_remote.remote(run_opts, run_opts.noise_values_array, tr[i], train_streams[i], \
1627                                              rt[i], res_streams[i], test_streams[i], test_idxs, noise_streams, i) for i
1628                 in incomplete_idxs])
1629        else:
1630            out_base = [find_stability_serial(run_opts, run_opts.noise_values_array, tr[i], train_streams[i], \
1631                                              rt[i], res_streams[i], test_streams[i], test_idxs, noise_streams, i) for i
1632                        in incomplete_idxs]
1633
1634    else:
1635        res_seeds = ss_res.spawn(run_opts.res_per_test + run_opts.res_start)
1636        train_seeds = ss_train.spawn(run_opts.num_trains + run_opts.train_start)
1637        test_seeds = ss_test.spawn(run_opts.num_tests + run_opts.test_start)
1638        test_idxs = np.arange(run_opts.test_start, run_opts.test_start + run_opts.num_tests)
1639        noise_seeds = ss_noise.spawn(run_opts.noise_realizations)
1640        res_streams = np.zeros(run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size,
1641                               dtype=object)
1642        train_streams = np.zeros(run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size,
1643                                 dtype=object)
1644        test_streams = np.zeros(
1645            (run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size, run_opts.num_tests),
1646            dtype=object)
1647        noise_streams = np.zeros((run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size,
1648                                  run_opts.noise_realizations), dtype=object)
1649
1650        tnr, ntr, rtn = np.meshgrid(np.arange(run_opts.num_trains), run_opts.noise_values_array,
1651                                    np.arange(run_opts.res_per_test))
1652        tnr = tnr.flatten() + run_opts.res_start
1653        ntr = ntr.flatten()
1654        rtn = rtn.flatten() + run_opts.train_start
1655
1656        for i in range(run_opts.num_trains * run_opts.res_per_test * run_opts.noise_values_array.size):
1657            # print(res_seeds[rtn[i]])
1658            res_streams[i] = np.random.default_rng(res_seeds[rtn[i]])
1659            train_streams[i] = np.random.default_rng(train_seeds[tnr[i]])
1660            test_streams[i] = np.array([np.random.default_rng(test_seeds[j]) for \
1661                                        j in test_idxs])
1662            noise_streams[i] = np.array([np.random.default_rng(j) for j in noise_seeds])
1663        incomplete_idxs = []
1664        for i in range(tnr.size):
1665            if not os.path.exists(os.path.join(run_opts.run_folder_name,
1666                                               'train_max_rms_res%d_train%d_noise%e_regtrain%d.csv' % (
1667                                                       rtn[i], tnr[i], ntr[i], run_opts.reg_train_times[-1]))):
1668                incomplete_idxs.append(i)
1669
1670        print('Starting Ray Computation')
1671        tic = time.perf_counter()
1672        if run_opts.ifray:
1673            out_base = ray.get([find_stability_remote.remote(run_opts, ntr[i], tnr[i], train_streams[i], \
1674                                                             rtn[i], res_streams[i], test_streams[i], test_idxs,
1675                                                             noise_streams[i], i) for i in incomplete_idxs])
1676        else:
1677            out_base = [find_stability_serial(run_opts, ntr[i], tnr[i], train_streams[i], rtn[i], \
1678                                              res_streams[i], test_streams[i], test_idxs, noise_streams[i], i) for i in
1679                        incomplete_idxs]
1680    if len(incomplete_idxs) != 0:
1681        toc = time.perf_counter()
1682        runtime = toc - tic
1683        print('Runtime over all cores: %f sec.' % (runtime))
1684        print('Ray finished.')
1685        print('Results Saved')
1686    else:
1687        print('No incomplete runs were found. Ending job.')
1688
1689    if run_opts.ifray:
1690        ray.shutdown()
def main(argv):
1693def main(argv):
1694    start_reservoir_test(argv)