Statistics of experiments with AR(p), PSD, and IHS for EEG signals¶

In [1]:
%%html
<!--hide_me_please-->

<p class="showoutput_report_view" id="first123456aa_article_view"><a href="javascript:output_report_or_work_toggle('#first123456_work_view')">show the report view</a></p>
<p class="showoutput_work_view" id="first123456_work_view"><a href="javascript:output_report_or_work_toggle('#first123456aa_article_view')">show the work view</a></p>

<p class="showcode" id="first123456aa"><a href="javascript:code_toggle('#first123456')">show codes in the notebook</a></p>
<p class="hidecode" id="first123456"><a href="javascript:code_toggle('#first123456aa')">hide codes in the notebook</a></p>

<p class="showoutput_text" id="first123456aa_text"><a href="javascript:output_text_toggle('#first123456_text')">show text outputs in the notebook</a></p>
<p class="hideoutput_text" id="first123456_text"><a href="javascript:output_text_toggle('#first123456aa_text')">hide text outputs in the notebook</a></p>
In [2]:
# hide_me_please
from IPython.display import HTML

def this_is_temporary_cell():
    cell_output_is_temporary()

def cell_output_is_temporary():
    HTML("<!--this_is_temporary_output-->")
    print("# this_is_temporary_output")
In [2]:
# hide_me_please
from plotly.offline import init_notebook_mode

init_notebook_mode()
In [9]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="sdgerewere"><a href="javascript:code_toggle('#sdgerewere')">hide codes in the notebook</a></p>

Initialization¶

In [10]:
# %reset out
In [11]:
# import dill
# dill.dump_session("notebook_env.db")
In [12]:
# import dill
# dill.load_session('notebook_env.db')
In [88]:
# Run the following installation:
# !pip install ../timeseries/
# !pip install git+https://github.com/krzpiesiewicz/timeseries
In [14]:
# !pip uninstall timeseries
In [15]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="23thgf45trgdg"><a href="javascript:code_toggle('#23thgf45trgdg')">hide codes in the notebook</a></p>
In [16]:
# imports
import os
import sys
import time
import IPython.display
from IPython.display import HTML
from pprint import pprint
import matplotlib.colors as mcolors
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import scipy.linalg
from scipy.stats import norm
from statsmodels.tsa.stattools import acovf
from statsmodels.tsa.stattools import levinson_durbin_pacf as ar_from_pacf
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.tools import Bunch, add_constant
from statsmodels.compat.numpy import lstsq
from statsmodels.regression.linear_model import OLS

import timeseries as tss
from timeseries import plot_ts
from timeseries.analysis import acf, pacf, plot_hist, plot_acf, plot_pacf, plot_stats
from timeseries.transform import IHSTransformer, get_smoothed
from timeseries.plotting import fig_with_vertical_subplots, ax_settings
from timeseries.forecast.scorings import get_scoring
In [17]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="344td34gf3tgf3"><a href="javascript:code_toggle('#344td34gf3tgf3')">hide codes in the notebook</a></p>
In [18]:
# import warnings
import matplotlib.pyplot as plt
import warnings

warnings.filterwarnings("ignore")
plt.rcParams.update(
    {
        "figure.max_open_warning": 0,
        "legend.framealpha": 1
    }
)
In [19]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="23thgf45trgdg123454"><a href="javascript:code_toggle('#23thgf45trgdg123454')">hide codes in the notebook</a></p>
In [20]:
# load extensions
%load_ext nb_black
%load_ext autoreload
%autoreload 2
%aimport timeseries
In [21]:
# load data
eeg_data = pd.read_csv("data/eeg_features_raw.csv")
eeg_ts = [eeg_data[col] for col in eeg_data][:-1]
In [22]:
%%html
<!--hide_me_please-->
<p class="showcode" id="34rfdertgfaa"><a href="javascript:code_toggle('#34rfdertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="34rfdertgf"><a href="javascript:code_toggle('#34rfdertgfaa')">hide codes in the notebook</a></p>

Normalization¶

In [23]:
# definig an interval for fitting normalization
ts = eeg_ts[0]
transformation_train_intv = tss.Interval(ts, 2, 1000)
val_intv = tss.Interval(ts, transformation_train_intv.end)

fig = plot_ts(
    transformation_train_intv.view(ts),
    color="orange",
    name="transformation train interval",
    ax_height=300,
)
plot_ts(val_intv.view(), color="tab:blue", subtitle="", fig=fig)
display(fig)
In [24]:
%%html
<!--hide_me_please-->
<p class="showcode" id="145ygfsdfghjtraa"><a href="javascript:code_toggle('#145ygfsdfghjtr')">show codes in the notebook</a></p>
<p class="hidecode" id="145ygfsdfghjtr"><a href="javascript:code_toggle('#145ygfsdfghjtraa')">hide codes in the notebook</a></p>
In [25]:
# standard normalization
standard_trans = [
    IHSTransformer(transformation_train_intv.view(ts), lmb=None) for ts in eeg_ts
]
standard_trans_ts = [trans.transform(ts) for ts, trans in zip(eeg_ts, standard_trans)]
In [26]:
# IHS normalization
ihs_trans = [
    IHSTransformer(transformation_train_intv.view(ts), verbose=True) for ts in eeg_ts
]
ihs_trans_ts = [trans.transform(ts) for ts, trans in zip(eeg_ts, ihs_trans)]
Order of differencing: 0
MLE of IHS lambda: 9.575000e-02
Order of differencing: 0
MLE of IHS lambda: 8.925000e-02
Order of differencing: 0
MLE of IHS lambda: 1.306250e-01
Order of differencing: 0
MLE of IHS lambda: 6.062500e-02
Order of differencing: 0
MLE of IHS lambda: 1.157813e-01
Order of differencing: 0
MLE of IHS lambda: 2.293750e-01
Order of differencing: 0
MLE of IHS lambda: 1.618750e-01
Order of differencing: 0
MLE of IHS lambda: 6.050000e-02
Order of differencing: 0
MLE of IHS lambda: 9.987500e-02
Order of differencing: 0
MLE of IHS lambda: 1.034375e-01
Order of differencing: 0
MLE of IHS lambda: 1.587500e-01
Order of differencing: 0
MLE of IHS lambda: 8.306250e-02
Order of differencing: 0
MLE of IHS lambda: 8.350000e-02
Order of differencing: 0
MLE of IHS lambda: 1.393750e-01
Order of differencing: 0
MLE of IHS lambda: 7.250000e-02
Order of differencing: 0
MLE of IHS lambda: 9.150000e-02
Order of differencing: 0
MLE of IHS lambda: 1.853125e-01
Order of differencing: 0
MLE of IHS lambda: 9.587500e-02
Order of differencing: 0
MLE of IHS lambda: 1.437500e-01
Order of differencing: 0
MLE of IHS lambda: 1.040625e-01
Order of differencing: 0
MLE of IHS lambda: 1.434375e-01
Order of differencing: 0
MLE of IHS lambda: 1.075000e-01
Order of differencing: 0
MLE of IHS lambda: 2.562500e-02
Order of differencing: 0
MLE of IHS lambda: 1.356250e-01
Order of differencing: 0
MLE of IHS lambda: 8.425000e-02
Order of differencing: 0
MLE of IHS lambda: 4.296875e-02
Order of differencing: 0
MLE of IHS lambda: 6.193750e-02
Order of differencing: 0
MLE of IHS lambda: 1.029687e-01
Order of differencing: 0
MLE of IHS lambda: 1.615625e-01
Order of differencing: 0
MLE of IHS lambda: 1.456250e-01
Order of differencing: 0
MLE of IHS lambda: 6.362500e-02
Order of differencing: 0
MLE of IHS lambda: 1.390625e-01
In [27]:
fig = plot_ts(
    standard_trans_ts[0][5525:5720],
    color="darkblue",
    marker="",
    linewidth=1.2,
    ax_height=300,
)
mask = np.abs(standard_trans_ts[0][5525:5720]) > 2
plot_ts(
    standard_trans_ts[0][5525:5720][mask],
    marker="o",
    #     alpha=0.5,
    markersize=7,
    linestyle="",
    color="darkblue",
    name="standardized",
    fig=fig,
)
plot_ts(ihs_trans_ts[0][5525:5720], color="orange", marker="", linewidth=1.2, fig=fig)
plot_ts(
    ihs_trans_ts[0][5525:5720][mask],
    marker="o",
    markersize=7,
    #     alpha=0.8,
    linestyle="",
    color="orange",
    name="IHS-normalized",
    fig=fig,
    subtitle="",
)
display(fig)
In [28]:
%%html
<!--hide_me_please-->
<p class="showcode" id="45ygfsdfghjtraa"><a href="javascript:code_toggle('#45ygfsdfghjtr')">show codes in the notebook</a></p>
<p class="hidecode" id="45ygfsdfghjtr"><a href="javascript:code_toggle('#45ygfsdfghjtraa')">hide codes in the notebook</a></p>

Q-Q Plots for Standard Normalized, IHS-normalized and Theoretical Normal Distribution¶

In [29]:
# Q-Q plot: EEG data versus a normal distribution
n = 7000
begin = 1000
ts_idx = 5
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, n))
normal_q = pd.Series(normal_samples, index=normal_samples)
standard_q = pd.Series(
    np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
    index=normal_samples,
)
ihs_q = pd.Series(
    np.sort(ihs_trans_ts[ts_idx][begin : begin + n]), index=normal_samples
)
fig = plot_ts(
    normal_q,
    color="black",
    xtitle="normal theoretical quantiles",
    ytitle="data quantiles",
    title="Quantile-Quantile Plot for EEG Data",
    subtitle="EEG data versus a normal distribution",
    width=1000,
    height=1000,
)
markersize = 5
plot_ts(
    standard_q,
    color="tab:blue",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="standard",
)
plot_ts(
    ihs_q,
    color="tab:red",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="IHS",
)
display(fig)
In [30]:
# mean Q-Q plot: EEG data versus a normal distribution
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

ts_idx = 5
n = len(begins)

normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, ts_len))
normal_q = pd.Series(normal_samples, index=normal_samples)

fig = plot_ts(
    normal_q,
    color="black",
    xtitle="normal theoretical quantiles",
    ytitle="data quantiles",
    title="Mean Quantile-Quantile Plot for EEG Data",
    subtitle="EEG data versus a normal distribution",
    width=1000,
    height=1000,
)
markersize = 5

mean_standard_q = pd.Series(
    np.zeros(ts_len),
    index=normal_samples,
)
mean_ihs_q = pd.Series(np.zeros(ts_len), index=normal_samples)

for begin, end in zip(begins, ends):
    standard_q = pd.Series(
        np.sort(standard_trans_ts[ts_idx][begin:end]),
        index=normal_samples,
    )
    ihs_q = pd.Series(np.sort(ihs_trans_ts[ts_idx][begin:end]), index=normal_samples)
    mean_standard_q += standard_q
    mean_ihs_q += ihs_q

mean_standard_q /= n
mean_ihs_q /= n


plot_ts(
    mean_standard_q,
    color="tab:blue",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="standard",
)
plot_ts(
    mean_ihs_q,
    color="tab:red",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="IHS",
)
display(fig)
In [31]:
len(eeg_ts)
Out[31]:
32
In [32]:
# mean of means Q-Q plot: EEG data versus a normal distribution
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

n = len(begins)

normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, ts_len))
normal_q = pd.Series(normal_samples, index=normal_samples)

fig = plot_ts(
    normal_q,
    color="black",
    xtitle="normal theoretical quantiles",
    ytitle="data quantiles",
    title="Mean of Means Quantile-Quantile Plot for EEG Data",
    subtitle="EEG data versus a normal distribution",
    width=1000,
    height=1000,
)
markersize = 5

mean_of_means_standard_q = pd.Series(
    np.zeros(ts_len),
    index=normal_samples,
)
mean_of_means_ihs_q = pd.Series(np.zeros(ts_len), index=normal_samples)

m = len(eeg_ts)
for ts_idx in range(m):
    mean_standard_q = pd.Series(
        np.zeros(ts_len),
        index=normal_samples,
    )
    mean_ihs_q = pd.Series(np.zeros(ts_len), index=normal_samples)

    for begin, end in zip(begins, ends):
        standard_q = pd.Series(
            np.sort(standard_trans_ts[ts_idx][begin:end]),
            index=normal_samples,
        )
        ihs_q = pd.Series(
            np.sort(ihs_trans_ts[ts_idx][begin:end]), index=normal_samples
        )
        mean_standard_q += standard_q
        mean_ihs_q += ihs_q

    mean_standard_q /= n
    mean_ihs_q /= n

    mean_of_means_standard_q += mean_standard_q
    mean_of_means_ihs_q += mean_ihs_q

mean_of_means_standard_q /= m
mean_of_means_ihs_q /= m

plot_ts(
    mean_of_means_standard_q,
    color="tab:blue",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="standard",
)
plot_ts(
    mean_of_means_ihs_q,
    color="tab:red",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="IHS",
)
display(fig)
In [33]:
%%html
<!--hide_me_please-->
<p class="showcode" id="123asdfery735aa"><a href="javascript:code_toggle('#123asdfery735')">show codes in the notebook</a></p>
<p class="hidecode" id="123asdfery735"><a href="javascript:code_toggle('#123asdfery735aa')">hide codes in the notebook</a></p>
In [34]:
# Q-Q plot: EEG data versus a normal distribution
n = 7000
begin = 1000
ts_idx = 5
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, n))
normal_q = pd.Series(normal_samples, index=normal_samples)
standard_q = pd.Series(
    np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
    index=normal_samples,
)
ihs_q = pd.Series(
    np.sort(ihs_trans_ts[ts_idx][begin : begin + n]), index=normal_samples
)
fig = plot_ts(
    normal_q,
    color="black",
    engine="plotly",
    name="theoretical normal",
    xaxis_title="normal theoretical quantiles",
    yaxis_title="data quantiles",
    title="Quantile-Quantile Plot for EEG Data",
    subtitle="EEG data versus a normal distribution",
    width=1000,
    height=1000,
)
markersize = 5
plot_ts(
    standard_q,
    color="tab:blue",
    fig=fig,
    name="standard",
)
plot_ts(
    ihs_q,
    color="tab:red",
    fig=fig,
    name="IHS",
)
display(fig)
In [35]:
%%html
<!--hide_me_please-->
<p class="showcode" id="asdfery735aa"><a href="javascript:code_toggle('#asdfery735')">show codes in the notebook</a></p>
<p class="hidecode" id="asdfery735"><a href="javascript:code_toggle('#asdfery735aa')">hide codes in the notebook</a></p>
In [36]:
# Q-Q plot: IHS-transformerd data versus standard normalized data
import matplotlib.pyplot as plt
from scipy.stats import norm

n = 1000
begin = 6000
qs = pd.Series(
    np.sort(ihs_trans_ts[ts_idx][begin : begin + n]),
    index=np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
)
fig = plot_ts(
    pd.Series(
        np.sort(ihs_trans_ts[ts_idx][begin : begin + n]),
        index=np.sort(ihs_trans_ts[ts_idx][begin : begin + n]),
    ),
    color="black",
    xtitle="standard quantiles",
    ytitle="IHS quantiles",
    title="Quantile-Quantile Plot for EEG Data",
    subtitle="IHS-transformerd data versus standard normalized data",
    width=1000,
    height=1000,
)
markersize = 5
plot_ts(
    qs,
    color="tab:green",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
)
fig
Out[36]:
In [37]:
%%html
<!--hide_me_please-->
<p class="showcode" id="765432asdfrewaa"><a href="javascript:code_toggle('#765432asdfrew')">show codes in the notebook</a></p>
<p class="hidecode" id="765432asdfrew"><a href="javascript:code_toggle('#765432asdfrewaa')">hide codes in the notebook</a></p>

PACF Plotting¶

In [29]:
# pacf methods definitions and descriptions
ols = "ols"
burg = "burg"
ld_biased = "ld_biased"

pacf_methods_names = {
    ols: "the OLS",
    burg: "Burg's",
    ld_biased: "the Durbin-Levinson",
}

methods_descrs = {
    ols: "the ordinary least squares method with an intercept",
    burg: "Burg's algorithm",
    ld_biased: "the Durbin-Levinson recursion without bias correction",
}
In [79]:
# def plot_diverse_intervals_experiments
def plot_pacf_for_intervals(
    ts, ts_type, begins, ends, ps, pacf_method, alpha=0.05, nlags=None, **kwargs
):
    for begin, end, p in zip(begins, ends, ps):
        color = (0, 1 - p * 0.5, 0)
        plot_pacf(
            ts[begin:end],
            alpha=alpha,
            nlags=nlags,
            method=pacf_method,
            label=f"[{begin},{end-1}]",
            color=color,
            conf_alpha=0.02,
            **kwargs,
        )
    if fig is not None:
        return fig


def plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    intervals_type,
    pacf_methods,
    ts_list,
    title=None,
    subtitle=None,
    **kwargs,
):
    const_title = title is not None
    const_subtitle = subtitle is not None
    for pacf_method in pacf_methods:
        method_descr = methods_descrs[pacf_method]
        if not const_title:
            title = f"{pacf_method.upper()} PACF Values Computed on {intervals_type}"
        fig, axs = fig_with_vertical_subplots(
            len(ts_list),
            title=title,
            subplots_titles=True,
            ax_height=450,
            fontsize=15,
        )
        for (ts, ts_type), ax in zip(ts_list, axs):
            if not const_subtitle:
                subtitle = f"{method_descr} for {ts_type}"
            plot_pacf_for_intervals(
                ts,
                ts_type,
                begins,
                ends,
                ps,
                pacf_method,
                ax=ax,
                subtitle=subtitle,
                **kwargs,
            )
        display(fig)
In [37]:
%%html
<!--hide_me_please-->
<p id="rtr4554rf11aa"></p>
<p class="hidecode" id="rtr4554rf11"><a href="javascript:code_toggle('#rtr4554rf11aa')">hide codes in the notebook</a></p>
In [80]:
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 60
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ld_biased],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [39]:
%%html
<!--hide_me_please-->
<p class="showcode" id="7654nbvcdfguyaa"><a href="javascript:code_toggle('#7654nbvcdfguy')">show codes in the notebook</a></p>
<p class="hidecode" id="7654nbvcdfguy"><a href="javascript:code_toggle('#7654nbvcdfguyaa')">hide codes in the notebook</a></p>
In [40]:
# OLS pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ols],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [41]:
%%html
<!--hide_me_please-->
<p class="showcode" id="jhgfuytr7654121212aa"><a href="javascript:code_toggle('#jhgfuytr7654121212')">show codes in the notebook</a></p>
<p class="hidecode" id="jhgfuytr7654121212"><a href="javascript:code_toggle('#jhgfuytr7654121212aa')">hide codes in the notebook</a></p>
In [42]:
# burg's pacf experiments on higly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [burg],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [43]:
%%html
<!--hide_me_please-->
<p class="showcode" id="dtrjertthh765aa"><a href="javascript:code_toggle('#dtrjertthh765')">show codes in the notebook</a></p>
<p class="hidecode" id="dtrjertthh765"><a href="javascript:code_toggle('#dtrjertthh765aa')">hide codes in the notebook</a></p>
In [44]:
# ld-biased pacf experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ld_biased],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [45]:
%%html
<!--hide_me_please-->
<p class="showcode" id="34567uhgfvbyuyutyaa"><a href="javascript:code_toggle('#34567uhgfvbyuyuty')">show codes in the notebook</a></p>
<p class="hidecode" id="34567uhgfvbyuyuty"><a href="javascript:code_toggle('#34567uhgfvbyuyutyaa')">hide codes in the notebook</a></p>
In [46]:
# burg's pacf experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [burg],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [47]:
%%html
<!--hide_me_please-->
<p class="showcode" id="asfnitohaefw434312erfd345tfdaa"><a href="javascript:code_toggle('#asfnitohaefw434312erfd345tfd')">show codes in the notebook</a></p>
<p class="hidecode" id="asfnitohaefw434312erfd345tfd"><a href="javascript:code_toggle('#asfnitohaefw434312erfd345tfdaa')">hide codes in the notebook</a></p>
In [48]:
# burg's pacf experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ols],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [49]:
%%html
<!--hide_me_please-->
<p class="showcode" id="fnitohaefw434312erfd345tfdaa"><a href="javascript:code_toggle('#fnitohaefw434312erfd345tfd')">show codes in the notebook</a></p>
<p class="hidecode" id="fnitohaefw434312erfd345tfd"><a href="javascript:code_toggle('#fnitohaefw434312erfd345tfdaa')">hide codes in the notebook</a></p>

PACF Statistics¶

In [96]:
# def pacf_means_and_stds
def pacf_means_and_stds(ts, begins, ends, pacf_method, alpha=None, nlags=20):
    n = len(begins)
    assert len(ends) == n
    pacf_values = np.zeros((n, nlags + 1), dtype=float)
    for i, (begin, end) in enumerate(zip(begins, ends)):
        res = pacf(ts[begin:end], method=pacf_method, alpha=alpha, nlags=nlags)
        if alpha is None:
            res = (res, None)
        values, conf_intvs = res
        pacf_values[i, :] = values
    mu = np.mean(pacf_values, 0)
    std = np.std(pacf_values, 0)
    return mu, std, conf_intvs
In [176]:
%%html
<!--hide_me_please-->
<p class="showcode" id="utgjtygfrewsaa"><a href="javascript:code_toggle('#utgjtygfrews')">show codes in the notebook</a></p>
<p class="hidecode" id="utgjtygfrews"><a href="javascript:code_toggle('#utgjtygfrewsaa')">hide codes in the notebook</a></p>
In [59]:
# Mean PACF With Standard Deviations at Lags
for pacf_method in [ld_biased, ols]:
    ts_idx = 5
    nlags = 60
    ts_len = 2000
    step = 750
    begin = 0
    begins = np.arange(begin, 8001 - ts_len, step)
    ends = np.arange(begin + ts_len, 8001, step)
    alpha = 0.05
    conf_value = np.diff(
        pacf(standard_trans_ts[ts_idx][:ts_len], method=pacf_method, alpha=alpha)[1][1]
    )
    mu_standard, std_standard, _ = pacf_means_and_stds(
        standard_trans_ts[ts_idx], begins, ends, pacf_method=pacf_method, nlags=nlags
    )
    std_bar_standard = np.transpose(
        np.array([mu_standard - std_standard, mu_standard + std_standard])
    )
    mu_ihs, std_ihs, _ = pacf_means_and_stds(
        ihs_trans_ts[ts_idx], begins, ends, pacf_method=pacf_method, nlags=nlags
    )
    std_bar_ihs = np.transpose(np.array([mu_ihs - std_ihs, mu_ihs + std_ihs]))
    fig = plot_stats(
        mu_standard,
        std_bar_standard,
        label="standard",
        title="Mean PACF With Standard Deviations at Lags",
        subtitle=f"{pacf_method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        showgrid=True,
    )
    plot_stats(mu_ihs, std_bar_ihs, color="tab:red", label="IHS", fig=fig)
    plot_ts(
        pd.Series([float(conf_value)] * (nlags), index=np.arange(1, nlags + 1)),
        linestyle="--",
        color="gray",
        name=f"{100 * (1-alpha)}% confidence interval",
        fig=fig,
    )
    plot_ts(
        pd.Series([-float(conf_value)] * (nlags), index=np.arange(1, nlags + 1)),
        linestyle="--",
        color="gray",
        fig=fig,
    )
    display(fig)
In [111]:
%%html
<!--hide_me_please-->
<p class="showcode" id="dfghjkluiytdddyhgtfrdeaa"><a href="javascript:code_toggle('#dfghjkluiytdddyhgtfrde')">show codes in the notebook</a></p>
<p class="hidecode" id="dfghjkluiytdddyhgtfrde"><a href="javascript:code_toggle('#dfghjkluiytdddyhgtfrdeaa')">hide codes in the notebook</a></p>
In [109]:
# mean std of nth lag of pacf
nlags = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_std(tss, pacf_method):
    mean_std_arr = np.zeros(nlags + 1)
    for ts in tss:
        _, std, _ = pacf_means_and_stds(
            ts, begins, ends, pacf_method=pacf_method, nlags=nlags
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


kwargs = {}
for pacf_method, color in zip(
    [ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]
):
    mean_std_standard = mean_std(standard_trans_ts, pacf_method=pacf_method)
    mean_std_ihs = mean_std(ihs_trans_ts, pacf_method=pacf_method)

    fig = plot_ts(
        mean_std_standard,
        calc_xticks=True,
        major_xstep=1,
        color=color,
        linestyle="-",
        marker="o",
        name=f"{pacf_methods_names[pacf_method]} method, standardized",
        title="Mean Standard Deviation of the n-th lag of the PACF",
        subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} PACFs.\nEach of {len(begins)} PACFs are computed for samples (of length {ts_len}) from the same EEG signal.",
        xtitle="lag",
        height=800,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        mean_std_ihs,
        color=color,
        linestyle="--",
        marker="o",
        name=f"{pacf_methods_names[pacf_method]} method, IHS",
        **kwargs,
    )
# for alpha in [0.05, 0.1]:
#     conf_value = np.diff(
#         pacf(standard_trans_ts[5][:ts_len], method=ld_biased, alpha=alpha)[1][1]
#     )
#     plot_ts(
#         pd.Series([float(conf_value)] * 2, index=[1, nlags]),
#         linestyle="--",
#         color="black",
#         alpha=1 - alpha,
#         name=f"{100 * np.power(1 - alpha, 2)}% confidence interval",
#         fig=fig,
#     )
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [247]:
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_std(tss, pacf_method):
    mean_std_arr = np.zeros(nlags + 1)
    for ts in tss:
        _, std, _ = pacf_means_and_stds(
            ts, begins, ends, pacf_method=pacf_method, nlags=nlags
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


mean_std_standard_lst = []
mean_std_ihs_lst = []
for pacf_method, color in zip(
    [ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]
):
    mean_std_standard_lst.append(mean_std(standard_trans_ts, pacf_method=pacf_method))
    mean_std_ihs_lst.append(mean_std(ihs_trans_ts, pacf_method=pacf_method))
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [249]:
mean_std_standard_lst
Out[249]:
[array([0.        , 0.06884442, 0.06008461, 0.05956977, 0.03641065,
        0.03640015, 0.06469362, 0.10497497, 0.14255949, 0.1779871 ,
        0.17151771, 0.1390479 , 0.1101367 , 0.10702649, 0.10660745,
        0.09187245, 0.07833295, 0.07528637, 0.08217776, 0.06814858,
        0.06248961, 0.06287291, 0.05589284, 0.05745575, 0.05603281,
        0.04708649, 0.0551808 , 0.04521974, 0.04650722, 0.05149424,
        0.04163647, 0.04416145, 0.04780465, 0.03612936, 0.04418514,
        0.04489971, 0.0359236 , 0.04459881, 0.04087242, 0.03750268,
        0.04310225, 0.03408193, 0.03893691, 0.0452443 , 0.03710312,
        0.04287484, 0.04215425, 0.03842098, 0.04431751, 0.04125512,
        0.04008756, 0.04550197, 0.03971834, 0.03924151, 0.03577931,
        0.03104239, 0.03248604, 0.03098656, 0.02904249, 0.03309645,
        0.02771732, 0.02811536, 0.0277012 , 0.02553299, 0.026126  ,
        0.02463593, 0.02652335, 0.02747295, 0.02532692, 0.02818341,
        0.02685351, 0.02449602, 0.02652022, 0.02360093, 0.02346183,
        0.02563134, 0.02220003, 0.02510749, 0.02420962, 0.01900614,
        0.02039413, 0.02256535, 0.02120094, 0.01889889, 0.02280179,
        0.02296092, 0.01975477, 0.02127303, 0.01783942, 0.01994757,
        0.02101902, 0.01927789, 0.01934253, 0.01984837, 0.01726244,
        0.01725672, 0.02132061, 0.018968  , 0.01804557, 0.0213213 ,
        0.01911132]),
 array([0.        , 0.06834219, 0.06050395, 0.06029183, 0.02811839,
        0.02483879, 0.01856406, 0.01665748, 0.01307142, 0.017635  ,
        0.01546537, 0.01356109, 0.01242241, 0.01339315, 0.01498671,
        0.01409847, 0.01220547, 0.0132194 , 0.01288843, 0.01642791,
        0.01183768, 0.01517196, 0.01107391, 0.01782081, 0.0140062 ,
        0.01303496, 0.01315321, 0.01431254, 0.01253496, 0.01310601,
        0.01205415, 0.01246617, 0.01291965, 0.01392688, 0.01294471,
        0.01454068, 0.01663214, 0.0228325 , 0.03029092, 0.02824991,
        0.01929369, 0.01430436, 0.02220969, 0.03124958, 0.04075531,
        0.02911302, 0.01799775, 0.01990113, 0.03312543, 0.02810393,
        0.03117046, 0.02535519, 0.03354963, 0.03878176, 0.03573591,
        0.04143104, 0.04292328, 0.0484086 , 0.05519348, 0.04576012,
        0.05491505, 0.05158605, 0.06014293, 0.05423854, 0.05514643,
        0.06443518, 0.05674118, 0.05786033, 0.06562554, 0.06119671,
        0.06791183, 0.06076305, 0.06065572, 0.06132231, 0.05802295,
        0.06046182, 0.06210863, 0.05621077, 0.0604689 , 0.0598553 ,
        0.05670996, 0.05953986, 0.05155958, 0.05514452, 0.05311776,
        0.04873861, 0.04436133, 0.04630733, 0.04243492, 0.04333256,
        0.0359166 , 0.03717592, 0.03808852, 0.03461052, 0.03160854,
        0.02949729, 0.0315837 , 0.03474528, 0.03119681, 0.02931005,
        0.02975839]),
 array([0.        , 0.06910158, 0.06043187, 0.05997202, 0.02895086,
        0.02437364, 0.01964296, 0.01659867, 0.01328594, 0.01744706,
        0.0171794 , 0.01289267, 0.01399181, 0.01353166, 0.01528892,
        0.01535227, 0.01223526, 0.01336451, 0.01300638, 0.01738575,
        0.01247989, 0.01570446, 0.01167201, 0.01767777, 0.0164243 ,
        0.01330198, 0.01314208, 0.01406053, 0.01411846, 0.01300724,
        0.01414249, 0.01316716, 0.01425646, 0.01480475, 0.01372799,
        0.013598  , 0.01774399, 0.02446032, 0.02992696, 0.02933983,
        0.0187776 , 0.01461124, 0.02338573, 0.02986815, 0.04072699,
        0.02606576, 0.01343696, 0.01695249, 0.02486913, 0.02365048,
        0.01806483, 0.01770316, 0.02394964, 0.02407358, 0.02612113,
        0.01985364, 0.02155834, 0.02416014, 0.029786  , 0.01981406,
        0.02233084, 0.01978554, 0.02330169, 0.01702942, 0.01957632,
        0.02364687, 0.01668483, 0.0166756 , 0.02065855, 0.02341772,
        0.01994759, 0.01933099, 0.01890621, 0.02122679, 0.02268258,
        0.01458627, 0.02084995, 0.02065513, 0.02700453, 0.01872854,
        0.02077141, 0.01836126, 0.01771518, 0.02120855, 0.02109958,
        0.02049915, 0.01581773, 0.01847886, 0.02166008, 0.02433648,
        0.01635201, 0.02144695, 0.02183053, 0.02239252, 0.01501103,
        0.01866784, 0.01762667, 0.0245958 , 0.01883803, 0.01756493,
        0.0208851 ])]
In [60]:
%%html
<!--hide_me_please-->
<p class="showcode" id="111dfghjkluiytdddyhgtfrdeaa"><a href="javascript:code_toggle('#111dfghjkluiytdddyhgtfrde')">show codes in the notebook</a></p>
<p class="hidecode" id="111dfghjkluiytdddyhgtfrde"><a href="javascript:code_toggle('#111dfghjkluiytdddyhgtfrdeaa')">hide codes in the notebook</a></p>
In [268]:
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_std(tss, pacf_method):
    mean_std_arr = np.zeros(nlags + 1)
    for ts in tss:
        _, std, _ = pacf_means_and_stds(
            ts, begins, ends, pacf_method=pacf_method, nlags=nlags
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


kwargs = {}
for pacf_method, color, (mean_std_standard, mean_std_ihs) in zip(
    [ld_biased, burg, ols],
    ["tab:blue", "darkred", "tab:orange"],
    zip(mean_std_standard_lst, mean_std_ihs_lst),
):
    fig = plot_ts(
        get_smoothed(pd.Series(mean_std_standard), std=2, only_prevs=False),
        color=color,
        linestyle="-",
        linewidth=3,
        alpha=0.5,
        #         marker="o",
        name=f"{pacf_method} method on standard",
        title="Mean Standard Deviation of the n-th lag of the PACF",
        subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consiting of {len(begins)} PACFs.\nEach of {len(begins)} PACFs are computed for samples (of length {ts_len}) from the same EEG signal.",
        xtitle="lag",
        height=800,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        get_smoothed(pd.Series(mean_std_ihs), std=2, only_prevs=False),
        color=color,
        linestyle="--",
        linewidth=3,
        alpha=0.5,
        #         marker="o",
        name=f"{pacf_method} method on IHS",
        **kwargs,
    )
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [297]:
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_std(tss, pacf_method):
    mean_std_arr = np.zeros(nlags + 1)
    for ts in tss:
        _, std, _ = pacf_means_and_stds(
            ts, begins, ends, pacf_method=pacf_method, nlags=nlags
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


kwargs = dict(
    title="Mean Standard Deviation of the n-th lag of the PACF",
    subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consiting of {len(begins)} PACFs.\nEach of {len(begins)} PACFs are computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="lag",
    height=500,
)
for pacf_method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
    [ld_biased, burg, ols],
    [("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
    zip(mean_std_standard_lst, mean_std_ihs_lst),
):
    fig = plot_ts(
        get_smoothed(pd.Series(mean_std_standard), std=2, only_prevs=False),
        color="darkblue",
        linestyle=linestyle,
        linewidth=linewidth,
        #         alpha=0.5,
        #         marker="o",
        name=f"{pacf_methods_names[pacf_method]} method for standardized",
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        get_smoothed(pd.Series(mean_std_ihs), std=2, only_prevs=False),
        color="tab:orange",
        linestyle=linestyle,
        linewidth=linewidth,
        #         alpha=0.5,
        #         marker="o",
        name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
        **kwargs,
    )
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [112]:
%%html
<!--hide_me_please-->
<p class="showcode" id="566grtrtrgr665jkjaa"><a href="javascript:code_toggle('#566grtrtrgr665jkj')">show codes in the notebook</a></p>
<p class="hidecode" id="566grtrtrgr665jkj"><a href="javascript:code_toggle('#566grtrtrgr665jkjaa')">hide codes in the notebook</a></p>

Autocorrelations Statistics¶

In [30]:
# mean_autocorr
def mean_autocorr(begins, ends, title=None, subtitle=None, nlags=20):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    ts_len = ends[0] - begins[0]
    if title is None:
        title = "Mean Autocovariance"
    kwargs = dict(title=title, subtitle=subtitle, showgrid=True)
    fig = None
    for ts, ts_name, color in [
        (standard_trans_ts, "standard", "tab:blue"),
        (ihs_trans_ts, "IHS", "tab:red"),
    ]:
        autocorrs = np.zeros((len(begins), nlags + 1))
        for i, (begin, end) in enumerate(zip(begins, ends)):
            autocorrs[i, :] = acovf(ts[ts_idx][begin:end], fft=False)[: nlags + 1]
            autocorrs[i, :] /= autocorrs[i, 0]
        mu = np.mean(autocorrs, 0)
        std = np.std(autocorrs, 0)
        fig = plot_stats(
            mu,
            std=std,
            xs=np.arange(0, nlags + 1),
            label=ts_name,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
    plot_ts(
        pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
        linestyle="--",
        color="gray",
        name=f"{100 * (1-alpha)}% confidence interval",
        fig=fig,
    )
    plot_ts(
        pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
        linestyle="--",
        color="gray",
        fig=fig,
    )
    display(fig)
In [120]:
%%html
<!--hide_me_please-->
<p id="23ragflgothrt39raa"></p>
<p class="hidecode" id="23ragflgothrt39r"><a href="javascript:code_toggle('#23ragflgothrt39raa')">hide codes in the notebook</a></p>
In [44]:
# Mean of the Autocorrelations of Highly Overlapping Intervals
nlags = 60
alpha = 0.05
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 20, 1))
begins = [begin + n for n in range(20)]

mean_autocorr(
    begins,
    ends,
    title=f"Mean of the Autocovariances of Highly Overlapping Intervals",
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    nlags=nlags,
)
Intervals: [[4000, 6000], [4001, 6001], [4002, 6002], [4003, 6003], [4004, 6004], [4005, 6005], [4006, 6006], [4007, 6007], [4008, 6008], [4009, 6009], [4010, 6010], [4011, 6011], [4012, 6012], [4013, 6013], [4014, 6014], [4015, 6015], [4016, 6016], [4017, 6017], [4018, 6018], [4019, 6019]]
In [113]:
%%html
<!--hide_me_please-->
<p class="showcode" id="3344343fgfbbnlo8haa"><a href="javascript:code_toggle('#3344343fgfbbnlo8h')">show codes in the notebook</a></p>
<p class="hidecode" id="3344343fgfbbnlo8h"><a href="javascript:code_toggle('#3344343fgfbbnlo8haa')">hide codes in the notebook</a></p>
In [45]:
# Mean Autocorrelations
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

mean_autocorr(
    begins,
    ends,
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    nlags=nlags,
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [46]:
# Mean Autocorrelations
alpha = 0.05
nlags = 60
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

mean_autocorr(
    begins,
    ends,
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    nlags=nlags,
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [60]:
# mean_autocorr
def mean_autocorr_diffs(
    begins, ends, ts_idx, title=None, subtitle=None, nlags=20, plot=True
):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    ts_len = ends[0] - begins[0]
    if title is None:
        title = "Mean Autocovariance Diffs"
    kwargs = dict(title=title, subtitle=subtitle, showgrid=True)
    fig = None
    autocorrs = np.zeros((2, len(begins), nlags + 1))
    autocorrs_diffs = np.zeros((len(begins), nlags + 1))
    for idx, ts in enumerate(
        [
            standard_trans_ts,
            ihs_trans_ts,
        ]
    ):
        for i, (begin, end) in enumerate(zip(begins, ends)):
            autocorrs[idx, i, :] = np.abs(
                acovf(ts[ts_idx][begin:end], fft=False)[: nlags + 1]
            )
            autocorrs[idx, i, :] /= autocorrs[idx, i, 0]
    for i in range(len(begins)):
        autocorrs_diffs[i, :] = autocorrs[0, i, :] - autocorrs[1, i, :]
    mu = np.mean(autocorrs_diffs, 0)
    std = np.std(autocorrs_diffs, 0)
    if plot:
        fig = plot_stats(
            mu,
            std=std,
            xs=np.arange(0, nlags + 1),
            #             label="",
            color="tab:blue",
            **kwargs,
        )
        kwargs = dict(fig=fig)
        conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
        plot_ts(
            pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
            linestyle="--",
            color="gray",
            name=f"{100 * (1-alpha)}% confidence interval",
            fig=fig,
        )
        plot_ts(
            pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
            linestyle="--",
            color="gray",
            fig=fig,
        )
        display(fig)
    return mu, std


nlags = 20
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mean_autocorr_diffs(begins, ends, ts_idx, title=None, subtitle=None, nlags=nlags)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Out[60]:
(array([ 0.        ,  0.0505449 ,  0.0681595 , -0.00190475, -0.0426218 ,
        -0.00895878,  0.07238189,  0.06215635,  0.03873832,  0.02976874,
         0.00723982, -0.01357157, -0.01750296, -0.02799604, -0.03609785,
        -0.03430548, -0.03746041, -0.03851301, -0.027263  ,  0.01791099,
         0.01507431]),
 array([0.        , 0.01051261, 0.01580662, 0.01617799, 0.02628354,
        0.02877097, 0.02931422, 0.01972512, 0.01122015, 0.01110253,
        0.01735744, 0.02122177, 0.02620362, 0.0313107 , 0.02724007,
        0.02515155, 0.02871076, 0.02502282, 0.01442835, 0.02251427,
        0.00803729]))
In [78]:
# mean_autocorr
def mean_autocorr_diffs_averaged(
    begins, ends, title=None, subtitle=None, nlags=20, **kwargs
):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    ts_len = ends[0] - begins[0]
    if title is None:
        title = "Mean Autocovariance Diffs - Averaged"
    #     kwargs = dict(title=title, subtitle=subtitle, showgrid=True)
    index_values = np.arange(0, nlags + 1)
    kwargs["title"] = title
    kwargs["subtitle"] = subtitle
    kwargs["showgrid"] = True
    kwargs["calc_xticks"] = True
    kwargs["minor_xticks"] = 1
    kwargs["calc_grid"] = True
    kwargs["major_xstep"] = 1
    kwargs["index_values"] = index_values
    fig = None
    n = len(standard_trans_ts)
    means = np.zeros((n, nlags + 1))
    stds = np.zeros((n, nlags + 1))
    for ts_idx in range(n):
        mu, std = mean_autocorr_diffs(begins, ends, ts_idx, nlags=nlags, plot=False)
        means[ts_idx, :] = mu
        stds[ts_idx, :] = std
    mu = np.mean(means, 0)
    #     std = np.mean(stds, 0)
    std = np.std(means, 0)
    fig = plot_stats(
        mu,
        std=std,
        xs=index_values,
        #             label="",
        color="darkblue",
        conf_alpha=0.15,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
    plot_ts(
        pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
        linestyle="--",
        color="gray",
        name=f"{100 * (1-alpha)}% confidence interval",
        fig=fig,
    )
    plot_ts(
        pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
        linestyle="--",
        color="gray",
        fig=fig,
    )
    display(fig)


nlags = 20
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mean_autocorr_diffs_averaged(
    begins, ends, title="", ax_height=300, subtitle=None, nlags=nlags
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [114]:
%%html
<!--hide_me_please-->
<p class="showcode" id="12345ygtfraa"><a href="javascript:code_toggle('#12345ygtfr')">show codes in the notebook</a></p>
<p class="hidecode" id="12345ygtfr"><a href="javascript:code_toggle('#12345ygtfraa')">hide codes in the notebook</a></p>
In [301]:
%%html
<!--hide_me_please-->
<p class="showcode" id="zxc12345ygtfraa"><a href="javascript:code_toggle('#zxc12345ygtfr')">show codes in the notebook</a></p>
<p class="hidecode" id="zxc12345ygtfr"><a href="javascript:code_toggle('#zxc12345ygtfraa')">hide codes in the notebook</a></p>

Conditionings of Autocorrelation Matrices¶

In [121]:
%%html
<!--hide_me_please-->
<p id="12345gfderthbvbv1234asdfewaa"></p>
<p class="hidecode" id="12345gfderthbvbv1234asdfew"><a href="javascript:code_toggle('#12345gfderthbvbv1234asdfewaa')">hide codes in the notebook</a></p>
In [31]:
# cov_matrix
# [γ(i−j)]_{i,j=1}^{n}
def cov_matrix(acovf):
    rev_acovf = np.flipud(acovf)
    n = len(acovf)
    m = np.zeros((n, n))
    for i in range(n):
        m[i, :i] = rev_acovf[n - i - 1 : -1]
        m[i, i:] = acovf[: n - i]
    return m


# print("cov_matrix(np.array([1, 0.5, 0.7, 0.1]))")
# cov_matrix(np.array([1, 0.5, 0.7, 0.1]))
In [122]:
%%html
<!--hide_me_please-->
<p id="12345gfderthbvbv1234aa"></p>
<p class="hidecode" id="12345gfderthbvbv1234"><a href="javascript:code_toggle('#12345gfderthbvbv1234aa')">hide codes in the notebook</a></p>
In [32]:
# conditioning of the matrix computed using autocovariance with and without fft
ts_idx = 5
nlags = 60
for fft in [True, False]:
    standard_aconvf = acovf(standard_trans_ts[ts_idx], fft=fft)[:nlags]
    standard_aconvf /= standard_aconvf[0]
    ihs_aconvf = acovf(ihs_trans_ts[ts_idx], fft=fft)[:nlags]
    ihs_aconvf /= ihs_aconvf[0]
    standard_cov = cov_matrix(standard_aconvf)
    ihs_cov = cov_matrix(ihs_aconvf)
    print(f"fft={fft}")
    for q in [1, 2]:
        for m, name in [
            (standard_cov, "standard matrix:"),
            (ihs_cov, "ihs matrix:     "),
        ]:
            print(f"{q}-norm of {name} {np.linalg.cond(m, q)}")
fft=True
1-norm of standard matrix: 574200.562303358
1-norm of ihs matrix:      8821.417663716567
2-norm of standard matrix: 245607.37970942413
2-norm of ihs matrix:      4873.612282171252
fft=False
1-norm of standard matrix: 574200.5623012796
1-norm of ihs matrix:      8821.417663718816
2-norm of standard matrix: 245607.37970518306
2-norm of ihs matrix:      4873.612282173697
In [123]:
%%html
<!--hide_me_please-->
<p id="12345gfderthbvbvaa"></p>
<p class="hidecode" id="12345gfderthbvbv"><a href="javascript:code_toggle('#12345gfderthbvbvaa')">hide codes in the notebook</a></p>
In [33]:
# plot_mean_cond
def plot_mean_cond(begins, ends, title=None, subtitle=None, p=1, nlags=20):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    if title is None:
        title = f"Mean Conditioning {p}-norm"
    kwargs = dict(
        title=title,
        subtitle=subtitle,
        calc_xticks=True,
        yscale="log",
        xtitle="lag (matrix of size: lag x lag)",
    )
    fig = None
    for ts, ts_name, color in [
        (standard_trans_ts, "standard", "tab:blue"),
        (ihs_trans_ts, "IHS", "tab:red"),
    ]:
        mean_conds = np.zeros(nlags)
        std_conds = np.zeros(nlags)
        for lags in np.arange(1, nlags + 1):
            conds = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[ts_idx][begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                conds.append(np.linalg.cond(mcov, p))
            mean_conds[lags - 1] = np.mean(conds)
            std_conds[lags - 1] = np.std(conds)
        fig = plot_stats(
            mean_conds,
            xs=np.arange(1, nlags + 1),
            std=std_conds[0:],
            std_xs=np.arange(1, nlags + 1),
            fill_along_axis=False,
            fill_only_positive=False,
            label=ts_name,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    ax_settings(fig=fig, showgrid=True)
    display(fig)
In [124]:
%%html
<!--hide_me_please-->
<p id="2343rfsfsfsfsaa"></p>
<p class="hidecode" id="2343rfsfsfsfs"><a href="javascript:code_toggle('#2343rfsfsfsfsaa')">hide codes in the notebook</a></p>
In [27]:
# Mean of the Matrices Conditioning for Highly Overlapping Intervals
nlags = 60
alpha = 0.05
ts_idx = 10
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 20, 1))
begins = [begin + n for n in range(20)]
p = 2

plot_mean_cond(
    begins,
    ends,
    title=f"Mean of the Matrices Conditioning for Highly Overlapping Intervals",
    subtitle=f"(derrived from {p}-norm), {len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    p=p,
    nlags=nlags,
)
Intervals: [[4000, 6000], [4001, 6001], [4002, 6002], [4003, 6003], [4004, 6004], [4005, 6005], [4006, 6006], [4007, 6007], [4008, 6008], [4009, 6009], [4010, 6010], [4011, 6011], [4012, 6012], [4013, 6013], [4014, 6014], [4015, 6015], [4016, 6016], [4017, 6017], [4018, 6018], [4019, 6019]]
In [115]:
%%html
<!--hide_me_please-->
<p class="showcode" id="ghfdjks6758493aa"><a href="javascript:code_toggle('#ghfdjks6758493')">show codes in the notebook</a></p>
<p class="hidecode" id="ghfdjks6758493"><a href="javascript:code_toggle('#ghfdjks6758493aa')">hide codes in the notebook</a></p>
In [53]:
# Mean Conditioning
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2

plot_mean_cond(
    begins,
    ends,
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    p=p,
    nlags=nlags,
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [54]:
# Mean Conditioning
alpha = 0.05
nlags = 60
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2

plot_mean_cond(
    begins,
    ends,
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    p=p,
    nlags=nlags,
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [116]:
%%html
<!--hide_me_please-->
<p class="showcode" id="ertyu654ahlehjfaaa"><a href="javascript:code_toggle('#ertyu654ahlehjfa')">show codes in the notebook</a></p>
<p class="hidecode" id="ertyu654ahlehjfa"><a href="javascript:code_toggle('#ertyu654ahlehjfaaa')">hide codes in the notebook</a></p>
In [35]:
# mean_of_the_means_of_conds
# mean_of_the_stds_of_means

nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_of_conds_means_and_stds(tss, p=1):
    #     tss = tss[0:2]
    mean_conds = np.zeros((len(tss), nlags + 1))
    std_conds = np.zeros((len(tss), nlags + 1))
    for i, ts in enumerate(tss):
        for lags in range(1, nlags + 1):
            conds = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                conds.append(np.linalg.cond(mcov, p))
            mean_conds[i][lags] = np.mean(conds)
            std_conds[i][lags] = np.std(conds)
    return (
        pd.Series(np.around(np.mean(mean_conds, 0)[1:], 1), index=range(1, nlags + 1)),
        pd.Series(np.around(np.mean(std_conds, 0), 1)[1:], index=range(1, nlags + 1)),
    )


p = 2
kwargs = dict(
    title="Mean of the Means and Standard Deviations of Matrix Conditioning",
    subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of conditionings({q}-norm derrived) of {len(begins)} "
    f"covariance matrices of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
    f"groups are computed for samples (of length {ts_len}) "
    f"from the same EEG signal relating to the group.",
    xtitle="N (matrix size)",
    height=800,
    calc_xticks=True,
)
fig = None
for ts, ts_name, color in [
    (standard_trans_ts, "standard", "tab:blue"),
    (ihs_trans_ts, "IHS", "tab:red"),
]:
    mean_conds, std_conds = mean_of_conds_means_and_stds(ts, p=p)
    std_bar = np.transpose(
        np.array([np.around(mean_conds - std_conds, 1), mean_conds + std_conds])
    )
    fig = plot_stats(
        mean_conds.values,
        xs=mean_conds.index.values,
        conf_intvs=std_bar,
        fill_along_axis=False,
        fill_only_positive=False,
        label=ts_name,
        color=color,
        yscale="log",
        **kwargs,
    )
    kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [117]:
%%html
<!--hide_me_please-->
<p class="showcode" id="alejajhagheiuhr982r3hfuwiaa"><a href="javascript:code_toggle('#alejajhagheiuhr982r3hfuwi')">show codes in the notebook</a></p>
<p class="hidecode" id="alejajhagheiuhr982r3hfuwi"><a href="javascript:code_toggle('#alejajhagheiuhr982r3hfuwiaa')">hide codes in the notebook</a></p>

2-Norms of Autocorrelation Matrices¶

In [34]:
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
    #     tss = tss[0:2]
    mean_norms = np.zeros((len(tss), nlags + 1))
    std_norms = np.zeros((len(tss), nlags + 1))
    for i, ts in enumerate(tss):
        for lags in range(1, nlags + 1):
            norms = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                if inv:
                    mcov = np.linalg.inv(mcov)
                norms.append(np.linalg.norm(mcov, p))
            mean_norms[i][lags] = np.mean(norms)
            std_norms[i][lags] = np.std(norms)
    return (
        pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
        pd.Series(np.mean(std_norms, 0)[1:], index=range(1, nlags + 1)),
    )


#     return (
#         pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
#         pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
#     )

p = 2
kwargs = dict(
    title=f"Mean of the Means and Standard Deviations of Matrices' {p}-Norms",
    subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of {len(begins)} "
    f"covariance matrices' {p}-norms of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
    f"groups are computed for samples (of length {ts_len}) "
    f"from the same EEG signal relating to the group.",
    xtitle="N (matrix size)",
    height=800,
    calc_xticks=True,
)
fig = None
for ts, ts_name, color, inv in [
    (standard_trans_ts, "||A|| for standard", "tab:blue", False),
    (standard_trans_ts, "||A^-1|| for standard", "skyblue", True),
    (ihs_trans_ts, "||A|| for IHS", "tab:red", False),
    (ihs_trans_ts, "||A^-1|| for IHS", "tab:orange", True),
]:
    mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
    #     std_bar = np.transpose(
    #         np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
    #     )
    std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
    fig = plot_stats(
        mean_norms.values,
        xs=mean_norms.index.values,
        conf_intvs=std_bar,
        fill_along_axis=False,
        fill_only_positive=False,
        label=ts_name,
        color=color,
        yscale="log",
        **kwargs,
    )
    kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [35]:
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
    #     tss = tss[0:2]
    mean_norms = np.zeros((len(tss), nlags + 1))
    std_norms = np.zeros((len(tss), nlags + 1))
    for i, ts in enumerate(tss):
        for lags in range(1, nlags + 1):
            norms = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                if inv:
                    mcov = np.linalg.inv(mcov)
                norms.append(np.linalg.norm(mcov, p))
            mean_norms[i][lags] = np.mean(norms)
            std_norms[i][lags] = np.std(norms)
    return (
        pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
        pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
    )


#     return (
#         pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
#         pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
#     )

p = 2
kwargs = dict(
    title=f"Mean of the Means and Standard Deviations of Matrices' {p}-Norms",
    subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of {len(begins)} "
    f"covariance matrices' {p}-norms of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
    f"groups are computed for samples (of length {ts_len}) "
    f"from the same EEG signal relating to the group.",
    xtitle="N (matrix size)",
    height=800,
    calc_xticks=True,
)
fig = None
for ts, ts_name, color, inv in [
    (standard_trans_ts, "||A|| for standard", "tab:blue", False),
    (standard_trans_ts, "||A^-1|| for standard", "skyblue", True),
    (ihs_trans_ts, "||A|| for IHS", "tab:red", False),
    (ihs_trans_ts, "||A^-1|| for IHS", "tab:orange", True),
]:
    mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
    #     std_bar = np.transpose(
    #         np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
    #     )
    std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
    fig = plot_stats(
        mean_norms.values,
        xs=mean_norms.index.values,
        conf_intvs=std_bar,
        fill_along_axis=False,
        fill_only_positive=False,
        label=ts_name,
        color=color,
        yscale="log",
        **kwargs,
    )
    kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [31]:
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
    #     tss = tss[0:2]
    mean_norms = np.zeros((len(tss), nlags + 1))
    std_norms = np.zeros((len(tss), nlags + 1))
    for i, ts in enumerate(tss):
        for lags in range(1, nlags + 1):
            norms = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                if inv:
                    mcov = np.linalg.inv(mcov)
                norms.append(np.linalg.norm(mcov, p))
            mean_norms[i][lags] = np.mean(norms)
            std_norms[i][lags] = np.std(norms)
    return (
        pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
        pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
    )


#     return (
#         pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
#         pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
#     )

p = 2
kwargs = dict(
    title=f"Mean of the Means and Standard Deviations of Matrices' {p}-Norms",
    subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of {len(begins)} "
    f"covariance matrices' {p}-norms of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
    f"groups are computed for samples (of length {ts_len}) "
    f"from the same EEG signal relating to the group.",
    xtitle="N (matrix size)",
    ax_height=400,
    calc_xticks=True,
)
fig = None
for ts, ts_name, color, inv in [
    (standard_trans_ts, "$||R_n||$ for standard", "tab:blue", False),
    (standard_trans_ts, "$||R_n^{-1}||$ for standard", "skyblue", True),
    (ihs_trans_ts, "$||R_n||_2$ for IHS", "tab:red", False),
    (ihs_trans_ts, "$||R_n^-1||_2$ for IHS", "tab:orange", True),
]:
    mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
    #     std_bar = np.transpose(
    #         np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
    #     )
    std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
    fig = plot_stats(
        mean_norms.values,
        xs=mean_norms.index.values,
        conf_intvs=std_bar,
        fill_along_axis=False,
        fill_only_positive=False,
        label=ts_name,
        color=color,
        yscale="log",
        **kwargs,
    )
    kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [242]:
%%html
<!--hide_me_please-->
<p class="showcode" id="1alejajhagheiuhr982r3hfuwiaa"><a href="javascript:code_toggle('#1alejajhagheiuhr982r3hfuwi')">show codes in the notebook</a></p>
<p class="hidecode" id="1alejajhagheiuhr982r3hfuwi"><a href="javascript:code_toggle('#1alejajhagheiuhr982r3hfuwiaa')">hide codes in the notebook</a></p>

Differences Between Autocorrelation Matrices¶

In [36]:
m = np.array([[1, 2, 3], [4, 5, 6], [100, 1, 2]])
m
Out[36]:
array([[  1,   2,   3],
       [  4,   5,   6],
       [100,   1,   2]])
In [42]:
x = np.sum(m, axis=1)
x
Out[42]:
array([  6,  15, 103])
In [39]:
m1 = np.array([[4, 3, 5], [6, 8, 10], [120, 5, 3]])
y = np.sum(m1, axis=1)
y
Out[39]:
array([ 12,  24, 128])
In [43]:
OLS(y, x).fit().params
Out[43]:
array([1.2526219])
In [49]:
np.min(np.linalg.eigvals(m))
Out[49]:
-14.276055588948228

Averaged over intervals¶

In [151]:
# mean_of_alphas_and_deltas
def mean_of_alphas_and_deltas(nlags, ts_idx):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    stn_tss = [standard_trans_ts[ts_idx]]
    ihs_tss = [ihs_trans_ts[ts_idx]]

    mean_alphas = np.zeros((len(stn_tss), nlags + 1))
    mean_deltas = np.zeros((len(stn_tss), nlags + 1))
    mean_delta_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
    mean_one_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
    mean_min_eig_eye_pl_deltas = np.zeros((len(stn_tss), nlags + 1))
    mean_min_lmb_pl_1mlmb_alphas = np.zeros((len(stn_tss), nlags + 1))
    mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
        (len(stn_tss), nlags + 1)
    )
    mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
        (len(stn_tss), nlags + 1)
    )
    mean_stn_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    mean_bound_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    mean_bound_min_lambdas_without_delta_ineq = np.zeros((len(stn_tss), nlags + 1))
    mean_ihs_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    mean_ihs_min_lambdas_without_delta = np.zeros((len(stn_tss), nlags + 1))
    mean_increases = np.zeros((len(stn_tss), nlags + 1))
    std_alphas = np.zeros((len(stn_tss), nlags + 1))
    std_deltas = np.zeros((len(stn_tss), nlags + 1))
    std_delta_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
    std_one_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
    std_min_eig_eye_pl_deltas = np.zeros((len(stn_tss), nlags + 1))
    std_min_lmb_pl_1mlmb_alphas = np.zeros((len(stn_tss), nlags + 1))
    std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
        (len(stn_tss), nlags + 1)
    )
    std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
        (len(stn_tss), nlags + 1)
    )
    std_stn_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    std_bound_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    std_bound_min_lambdas_without_delta_ineq = np.zeros((len(stn_tss), nlags + 1))
    std_ihs_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    std_ihs_min_lambdas_without_delta = np.zeros((len(stn_tss), nlags + 1))
    std_increases = np.zeros((len(stn_tss), nlags + 1))

    for idx, (standard_ts, ihs_ts) in enumerate(zip(stn_tss, ihs_tss)):
        standard_mcovs = [None] * len(begins)
        ihs_mcovs = [None] * len(begins)
        for ts, mcovs in zip([standard_ts, ihs_ts], [standard_mcovs, ihs_mcovs]):
            for i, (begin, end) in enumerate(zip(begins, ends)):
                acov = acovf(ts[begin:end], fft=False)[:nlags]
                acov /= acov[0]
                mcovs[i] = cov_matrix(acov)
        for lags in range(2, nlags + 1):
            alphas = []
            deltas = []
            delta_over_one_pl_delta = []
            one_over_one_pl_delta = []
            min_eig_eye_pl_deltas = []
            min_lmb_pl_1mlmb_alphas = []
            one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = []
            min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = []
            stn_min_lambdas = []
            bound_min_lambdas = []
            bound_min_lambdas_without_delta_ineq = []
            ihs_min_lambdas = []
            ihs_min_lambdas_without_delta = []
            increases = []
            for i in range(len(begins)):
                eye = np.eye(lags)
                stn_mcov = standard_mcovs[i][:lags, :lags]
                ihs_mcov = ihs_mcovs[i][:lags, :lags]
                x = np.sum(stn_mcov - eye, axis=1)
                y = np.sum(ihs_mcov - eye, axis=1)
                alpha = OLS(y, x).fit().params[0]
                delta = ihs_mcov - eye - alpha * (stn_mcov - eye)
                delta_norm = np.linalg.norm(delta, ord=2)
                min_lambda = np.min(np.linalg.eigvals(stn_mcov))
                delta_over_1_pl_delta = delta_norm / (1 + delta_norm)
                one_over_1_pl_delta = 1 / (1 + delta_norm)
                increase = (1 - alpha) * (1 - min_lambda)
                min_eig_eye_pl_delta = np.min(np.linalg.eigvals(eye + delta))
                min_lmb_pl_1mlmb_alpha = min_lambda + (1 - min_lambda) * alpha
                alphas.append(alpha)
                deltas.append(delta_norm)
                delta_over_one_pl_delta.append(delta_over_1_pl_delta)
                one_over_one_pl_delta.append(one_over_1_pl_delta)
                min_eig_eye_pl_deltas.append(min_eig_eye_pl_delta)
                min_lmb_pl_1mlmb_alphas.append(min_lmb_pl_1mlmb_alpha)
                one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.append(
                    one_over_1_pl_delta - min_lmb_pl_1mlmb_alpha
                )
                min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.append(
                    #                     np.linalg.norm(np.linalg.inv(eye + delta), ord=2)
                    min_eig_eye_pl_delta
                    - min_lmb_pl_1mlmb_alpha
                )
                stn_min_lambdas.append(min_lambda)
                bound_min_lambdas.append(min_lambda + increase - delta_over_1_pl_delta)
                bound_min_lambdas_without_delta_ineq.append(
                    alpha * min_lambda - alpha + min_eig_eye_pl_delta
                )
                ihs_min_lambdas.append(np.min(np.linalg.eigvals(ihs_mcov)))
                ihs_min_lambdas_without_delta.append(
                    np.min(np.linalg.eigvals(eye + alpha * (stn_mcov - eye)))
                )
                increases.append(increase)
            mean_alphas[idx][lags] = np.mean(alphas)
            mean_deltas[idx][lags] = np.mean(deltas)
            mean_delta_over_one_pl_delta[idx][lags] = np.mean(delta_over_one_pl_delta)
            mean_one_over_one_pl_delta[idx][lags] = np.mean(one_over_one_pl_delta)
            mean_min_eig_eye_pl_deltas[idx][lags] = np.mean(min_eig_eye_pl_deltas)
            mean_min_lmb_pl_1mlmb_alphas[idx][lags] = np.mean(min_lmb_pl_1mlmb_alphas)
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas[idx][
                lags
            ] = np.mean(one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas)
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas[idx][
                lags
            ] = np.mean(min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas)
            mean_stn_min_lambdas[idx][lags] = np.mean(stn_min_lambdas)
            mean_bound_min_lambdas[idx][lags] = np.mean(bound_min_lambdas)
            mean_bound_min_lambdas_without_delta_ineq[idx][lags] = np.mean(
                bound_min_lambdas_without_delta_ineq
            )
            mean_ihs_min_lambdas[idx][lags] = np.mean(ihs_min_lambdas)
            mean_ihs_min_lambdas_without_delta[idx][lags] = np.mean(
                ihs_min_lambdas_without_delta
            )
            mean_increases[idx][lags] = np.mean(increases)

            std_alphas[idx][lags] = np.std(alphas)
            std_deltas[idx][lags] = np.std(deltas)
            std_delta_over_one_pl_delta[idx][lags] = np.std(delta_over_one_pl_delta)
            std_one_over_one_pl_delta[idx][lags] = np.std(one_over_one_pl_delta)
            std_min_eig_eye_pl_deltas[idx][lags] = np.std(min_eig_eye_pl_deltas)
            std_min_lmb_pl_1mlmb_alphas[idx][lags] = np.std(min_lmb_pl_1mlmb_alphas)
            std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas[idx][lags] = np.std(
                one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            )
            std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas[idx][lags] = np.std(
                min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            )
            std_stn_min_lambdas[idx][lags] = np.std(stn_min_lambdas)
            std_bound_min_lambdas[idx][lags] = np.std(bound_min_lambdas)
            std_bound_min_lambdas_without_delta_ineq[idx][lags] = np.std(
                bound_min_lambdas_without_delta_ineq
            )
            std_ihs_min_lambdas[idx][lags] = np.std(ihs_min_lambdas)
            std_ihs_min_lambdas_without_delta[idx][lags] = np.std(
                ihs_min_lambdas_without_delta
            )
            std_increases[idx][lags] = np.std(increases)
    return (
        (
            pd.Series(np.mean(mean_alphas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.mean(std_alphas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(np.mean(mean_deltas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.mean(std_deltas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_delta_over_one_pl_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_delta_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_one_over_one_pl_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_one_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_min_eig_eye_pl_deltas, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_min_eig_eye_pl_deltas, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_min_lmb_pl_1mlmb_alphas, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_min_lmb_pl_1mlmb_alphas, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[
                    2:
                ],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(
                np.mean(mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[
                    2:
                ],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(np.mean(mean_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.mean(std_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)
            ),
            pd.Series(np.mean(std_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_bound_min_lambdas_without_delta_ineq, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_bound_min_lambdas_without_delta_ineq, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(np.mean(mean_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.mean(std_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_ihs_min_lambdas_without_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.mean(std_ihs_min_lambdas_without_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(np.mean(mean_increases, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.mean(std_increases, 0)[2:], index=range(2, nlags + 1)),
        ),
    )
In [137]:
nlags = 40
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)


(
    (mean_alphas, std_alphas),
    (mean_deltas, std_deltas),
    (mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
    (mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
    (mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
    (mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
    (
        mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (
        mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (mean_stn_min_lambdas, std_stn_min_lambdas),
    (mean_bound_min_lambdas, std_bound_min_lambdas),
    (
        mean_bound_min_lambdas_without_delta_ineq,
        std_bound_min_lambdas_without_delta_ineq,
    ),
    (mean_ihs_min_lambdas, std_ihs_min_lambdas),
    (mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
    (mean_increases, std_increases),
) = mean_of_alphas_and_deltas(nlags, ts_idx=5)

std_bar_alphas = np.transpose(
    np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
    np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
    np.array(
        [
            mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
            mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
        ]
    )
)
std_bar_one_over_one_pl_delta = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
            mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
            mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
        ]
    )
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
            mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            - std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            + std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            - std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            + std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_stn_min_lambdas = np.transpose(
    np.array(
        [
            mean_stn_min_lambdas - std_stn_min_lambdas,
            mean_stn_min_lambdas + std_stn_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas - std_bound_min_lambdas,
            mean_bound_min_lambdas + std_bound_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas_without_delta_ineq
            - std_bound_min_lambdas_without_delta_ineq,
            mean_bound_min_lambdas_without_delta_ineq
            + std_bound_min_lambdas_without_delta_ineq,
        ]
    )
)
std_bar_ihs_min_lambdas = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas - std_ihs_min_lambdas,
            mean_ihs_min_lambdas + std_ihs_min_lambdas,
        ]
    )
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
            mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
        ]
    )
)
std_bar_increases = np.transpose(
    np.array([mean_increases - std_increases, mean_increases + std_increases])
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [146]:
kwargs = dict(
    xtitle="N (matrix size)",
    calc_xticks=True,
    fill_along_axis=False,
    fill_only_positive=False,
)


fig = plot_stats(
    mean_alphas.values,
    xs=mean_alphas.index.values,
    conf_intvs=std_bar_alphas,
    name="Average alpha",
    color="darkblue",
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

plot_stats(
    mean_deltas.values,
    xs=mean_deltas.index.values,
    conf_intvs=std_bar_deltas,
    name="Average 2-norm of delta",
    color="tab:red",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)


fig = plot_stats(
    mean_delta_over_one_plus_delta.values,
    xs=mean_delta_over_one_plus_delta.index.values,
    conf_intvs=std_bar_delta_over_one_plus_delta,
    name="delta / (1 + delta)",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_stats(
    mean_one_over_one_pl_delta.values,
    xs=mean_one_over_one_pl_delta.index.values,
    conf_intvs=std_bar_one_over_one_pl_delta,
    name="1 / (1 + delta)",
    color="pink",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_stats(
    mean_min_eig_eye_pl_deltas.values,
    xs=mean_min_eig_eye_pl_deltas.index.values,
    conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="min_eig_(eye_pl_delta)",
    color="tab:cyan",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

fig = plot_stats(
    mean_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_min_lmb_pl_1mlmb_alphas,
    name="min_lmb +  (1 - min_lmb) * alphas",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

display(fig)


fig = plot_stats(
    mean_stn_min_lambdas.values,
    xs=mean_stn_min_lambdas.index.values,
    conf_intvs=std_bar_stn_min_lambdas,
    name="stn_min_lambdas",
    color="darkblue",
    yscale="log",
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_ihs_min_lambdas_without_delta.values,
    xs=mean_ihs_min_lambdas_without_delta.index.values,
    conf_intvs=std_bar_ihs_min_lambdas_without_delta,
    name="ihs_min_lambdas_without_delta",
    color="tab:red",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_increases.values,
    xs=mean_increases.index.values,
    conf_intvs=std_bar_increases,
    name="increases",
    color="gray",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_stats(
    mean_bound_min_lambdas.values,
    xs=mean_bound_min_lambdas.index.values,
    conf_intvs=std_bar_bound_min_lambdas,
    name="bound_min_lambdas",
    color="tab:green",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_bound_min_lambdas_without_delta_ineq.values,
    xs=mean_bound_min_lambdas_without_delta_ineq.index.values,
    conf_intvs=std_bar_bound_min_lambdas_without_delta_ineq,
    name="bound_min_lambdas_without_delta_ineq",
    color="tab:cyan",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)


fig = plot_stats(
    mean_ihs_min_lambdas.values,
    xs=mean_ihs_min_lambdas.index.values,
    conf_intvs=std_bar_ihs_min_lambdas,
    name="ihs_min_lambdas",
    color="tab:orange",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

display(fig)
In [152]:
nlags = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)


(
    (mean_alphas, std_alphas),
    (mean_deltas, std_deltas),
    (mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
    (mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
    (mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
    (mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
    (
        mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (
        mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (mean_stn_min_lambdas, std_stn_min_lambdas),
    (mean_bound_min_lambdas, std_bound_min_lambdas),
    (
        mean_bound_min_lambdas_without_delta_ineq,
        std_bound_min_lambdas_without_delta_ineq,
    ),
    (mean_ihs_min_lambdas, std_ihs_min_lambdas),
    (mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
    (mean_increases, std_increases),
) = mean_of_alphas_and_deltas(nlags, ts_idx=5)

std_bar_alphas = np.transpose(
    np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
    np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
    np.array(
        [
            mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
            mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
        ]
    )
)
std_bar_one_over_one_pl_delta = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
            mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
            mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
        ]
    )
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
            mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            - std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            + std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            - std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            + std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_stn_min_lambdas = np.transpose(
    np.array(
        [
            mean_stn_min_lambdas - std_stn_min_lambdas,
            mean_stn_min_lambdas + std_stn_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas - std_bound_min_lambdas,
            mean_bound_min_lambdas + std_bound_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas_without_delta_ineq
            - std_bound_min_lambdas_without_delta_ineq,
            mean_bound_min_lambdas_without_delta_ineq
            + std_bound_min_lambdas_without_delta_ineq,
        ]
    )
)
std_bar_ihs_min_lambdas = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas - std_ihs_min_lambdas,
            mean_ihs_min_lambdas + std_ihs_min_lambdas,
        ]
    )
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
            mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
        ]
    )
)
std_bar_increases = np.transpose(
    np.array([mean_increases - std_increases, mean_increases + std_increases])
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [147]:
fig = plot_stats(
    mean_alphas.values,
    xs=mean_alphas.index.values,
    conf_intvs=std_bar_alphas,
#     fill_along_axis=False,
    name="$\\alpha_h$",
    color="darkblue",
    height=300,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_deltas.values,
    xs=mean_deltas.index.values,
    conf_intvs=std_bar_deltas,
#     fill_along_axis=False,
    name="$||\Delta_h||_2$",
    color="tab:orange",
    fig=fig,
#     height=300,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
In [148]:
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)

fig = plot_ts(
    mean_min_lmb_pl_1mlmb_alphas,
    #     std=std_bar_min_lmb_pl_1mlmb_alphas,
    name="$\lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
    color="darkblue",
    height=600,
    #     fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_min_eig_eye_pl_deltas,
    #     conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="$\lambda_{1}(I_h + \Delta_h)$",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_one_over_one_pl_delta,
    #     conf_intvs=std_bar_one_over_one_pl_delta,
    name="$\dfrac{1}{1 + ||\Delta_h||}$",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.8)

display(fig)
In [80]:
std_bar_min_lmb_pl_1mlmb_alphas
Out[80]:
array([[-0.97185294, -0.94455364],
       [-0.95327237, -0.90647971],
       [-0.94071317, -0.87857914],
       [-0.93138648, -0.85590474],
       [-0.92463737, -0.83739003],
       [-0.92011028, -0.82246194],
       [-0.91757226, -0.81068596],
       [-0.91631659, -0.8007922 ],
       [-0.9159434 , -0.79223595],
       [-0.91606501, -0.78442037],
       [-0.91641783, -0.7768644 ],
       [-0.91681258, -0.76905586],
       [-0.91729875, -0.76090871],
       [-0.91798503, -0.75279071],
       [-0.91901569, -0.74517136],
       [-0.92123352, -0.73895841],
       [-0.92550749, -0.73524188],
       [-0.93172264, -0.73655411],
       [-0.93887753, -0.75058411]])
In [154]:
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)

fig = plot_stats(
    mean_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_min_lmb_pl_1mlmb_alphas,
    fill_along_axis=False,
    name="$\lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
    color="darkblue",
    conf_alpha=0.1,
    height=600,
    #     fig=fig,
    **kwargs
)


fig = plot_stats(
    mean_min_eig_eye_pl_deltas.values,
    xs=mean_min_eig_eye_pl_deltas.index.values,
    conf_intvs=std_bar_min_eig_eye_pl_deltas,
    fill_along_axis=False,
    name="$\lambda_{1}(I_h + \Delta_h)$",
    color="tab:orange",
    conf_alpha=0.1,
    fig=fig,
    **kwargs
)

fig = plot_stats(
    mean_one_over_one_pl_delta.values,
    xs=mean_one_over_one_pl_delta.index.values,
    conf_intvs=std_bar_one_over_one_pl_delta,
    fill_along_axis=False,
    name="$\dfrac{1}{1 + ||\Delta_h||}$",
    color="tab:green",
    conf_alpha=0.1,
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.8)

display(fig)
In [156]:
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)

fig = plot_stats(
    mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
    #     fill_along_axis=False,
    name="$\lambda_{1}(I_h + \Delta_h) - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
    color="darkblue",
    #     conf_alpha=0.1,
    height=600,
    #     fig=fig,
    **kwargs
)


fig = plot_stats(
    mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
    #     fill_along_axis=False,
    name="$\dfrac{1}{1 + ||\Delta_h||} - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
    color="tab:orange",
    #     conf_alpha=0.1,
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
# ax.set_ylim(bottom=0.8)

display(fig)
In [157]:
kwargs = dict(
    xtitle="h (matrix size)",
    calc_xticks=True,
    major_xstep=1,
    height=500,
)

fig = plot_ts(
    pd.Series(data=0, index=mean_min_lmb_pl_1mlmb_alphas.index),
    #     std=std_bar_min_lmb_pl_1mlmb_alphas,
    name="",
    color="darkblue",
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_min_eig_eye_pl_deltas - mean_min_lmb_pl_1mlmb_alphas,
    #     conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="$\lambda_{1}(I + \Delta) - \lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_one_over_one_pl_delta - mean_min_lmb_pl_1mlmb_alphas,
    #     conf_intvs=std_bar_one_over_one_pl_delta,
    name="$\dfrac{1}{1 + ||\Delta||} - \lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
ax.set_ylim(bottom=-0.05, top=0.05)

display(fig)
WARNING:matplotlib.legend:No handles with labels found to put in legend.

Averaged over intervals and channels¶

In [163]:
# averaged_mean_of_alphas_and_deltas
def averaged_mean_of_alphas_and_deltas(nlags):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    stn_tss = standard_trans_ts
    ihs_tss = ihs_trans_ts

    mean_alphas = np.zeros((len(stn_tss), nlags + 1))
    mean_deltas = np.zeros((len(stn_tss), nlags + 1))
    mean_delta_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
    mean_one_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
    mean_min_eig_eye_pl_deltas = np.zeros((len(stn_tss), nlags + 1))
    mean_min_lmb_pl_1mlmb_alphas = np.zeros((len(stn_tss), nlags + 1))
    mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
        (len(stn_tss), nlags + 1)
    )
    mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
        (len(stn_tss), nlags + 1)
    )
    mean_stn_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    mean_bound_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    mean_bound_min_lambdas_without_delta_ineq = np.zeros((len(stn_tss), nlags + 1))
    mean_ihs_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
    mean_ihs_min_lambdas_without_delta = np.zeros((len(stn_tss), nlags + 1))
    mean_increases = np.zeros((len(stn_tss), nlags + 1))

    for idx, (standard_ts, ihs_ts) in enumerate(zip(stn_tss, ihs_tss)):
        standard_mcovs = [None] * len(begins)
        ihs_mcovs = [None] * len(begins)
        for ts, mcovs in zip([standard_ts, ihs_ts], [standard_mcovs, ihs_mcovs]):
            for i, (begin, end) in enumerate(zip(begins, ends)):
                acov = acovf(ts[begin:end], fft=False)[:nlags]
                acov /= acov[0]
                mcovs[i] = cov_matrix(acov)
        for lags in range(2, nlags + 1):
            alphas = []
            deltas = []
            delta_over_one_pl_delta = []
            one_over_one_pl_delta = []
            min_eig_eye_pl_deltas = []
            min_lmb_pl_1mlmb_alphas = []
            one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = []
            min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = []
            stn_min_lambdas = []
            bound_min_lambdas = []
            bound_min_lambdas_without_delta_ineq = []
            ihs_min_lambdas = []
            ihs_min_lambdas_without_delta = []
            increases = []
            for i in range(len(begins)):
                eye = np.eye(lags)
                stn_mcov = standard_mcovs[i][:lags, :lags]
                ihs_mcov = ihs_mcovs[i][:lags, :lags]
                x = np.sum(stn_mcov - eye, axis=1)
                y = np.sum(ihs_mcov - eye, axis=1)
                alpha = OLS(y, x).fit().params[0]
                delta = ihs_mcov - eye - alpha * (stn_mcov - eye)
                delta_norm = np.linalg.norm(delta, ord=2)
                min_lambda = np.min(np.abs(np.linalg.eigvals(stn_mcov)))
                delta_over_1_pl_delta = delta_norm / (1 + delta_norm)
                one_over_1_pl_delta = 1 / (1 + delta_norm)
                increase = (1 - alpha) * (1 - min_lambda)
                min_eig_eye_pl_delta = np.min(np.abs(np.linalg.eigvals(eye + delta)))
                min_lmb_pl_1mlmb_alpha = min_lambda + (1 - min_lambda) * alpha
                alphas.append(alpha)
                deltas.append(delta_norm)
                delta_over_one_pl_delta.append(delta_over_1_pl_delta)
                one_over_one_pl_delta.append(one_over_1_pl_delta)
                min_eig_eye_pl_deltas.append(min_eig_eye_pl_delta)
                min_lmb_pl_1mlmb_alphas.append(min_lmb_pl_1mlmb_alpha)
                one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.append(
                    one_over_1_pl_delta - min_lmb_pl_1mlmb_alpha
                )
                min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.append(
                    min_eig_eye_pl_delta - min_lmb_pl_1mlmb_alpha
                )
                stn_min_lambdas.append(min_lambda)
                bound_min_lambdas.append(min_lambda + increase - delta_over_1_pl_delta)
                bound_min_lambdas_without_delta_ineq.append(
                    alpha * min_lambda - alpha + min_eig_eye_pl_delta
                )
                ihs_min_lambdas.append(np.min(np.linalg.eigvals(ihs_mcov)))
                ihs_min_lambdas_without_delta.append(
                    np.min(np.linalg.eigvals(eye + alpha * (stn_mcov - eye)))
                )
                increases.append(increase)
            mean_alphas[idx][lags] = np.mean(alphas)
            mean_deltas[idx][lags] = np.mean(deltas)
            mean_delta_over_one_pl_delta[idx][lags] = np.mean(delta_over_one_pl_delta)
            mean_one_over_one_pl_delta[idx][lags] = np.mean(one_over_one_pl_delta)
            mean_min_eig_eye_pl_deltas[idx][lags] = np.mean(min_eig_eye_pl_deltas)
            mean_min_lmb_pl_1mlmb_alphas[idx][lags] = np.mean(min_lmb_pl_1mlmb_alphas)
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas[idx][
                lags
            ] = np.mean(one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas)
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas[idx][
                lags
            ] = np.mean(min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas)
            mean_stn_min_lambdas[idx][lags] = np.mean(stn_min_lambdas)
            mean_bound_min_lambdas[idx][lags] = np.mean(bound_min_lambdas)
            mean_bound_min_lambdas_without_delta_ineq[idx][lags] = np.mean(
                bound_min_lambdas_without_delta_ineq
            )
            mean_ihs_min_lambdas[idx][lags] = np.mean(ihs_min_lambdas)
            mean_ihs_min_lambdas_without_delta[idx][lags] = np.mean(
                ihs_min_lambdas_without_delta
            )
            mean_increases[idx][lags] = np.mean(increases)
    return (
        (
            pd.Series(np.mean(mean_alphas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.std(mean_alphas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(np.mean(mean_deltas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.std(mean_deltas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_delta_over_one_pl_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_delta_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_one_over_one_pl_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_one_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_min_eig_eye_pl_deltas, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_min_eig_eye_pl_deltas, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_min_lmb_pl_1mlmb_alphas, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_min_lmb_pl_1mlmb_alphas, 0)[2:], index=range(2, nlags + 1)
            ),
        ),
        (
            pd.Series(
                np.mean(mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[
                    2:
                ],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(
                np.mean(mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[
                    2:
                ],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(np.mean(mean_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.std(mean_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)
            ),
            pd.Series(np.std(mean_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_bound_min_lambdas_without_delta_ineq, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_bound_min_lambdas_without_delta_ineq, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(np.mean(mean_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.std(mean_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
        ),
        (
            pd.Series(
                np.mean(mean_ihs_min_lambdas_without_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
            pd.Series(
                np.std(mean_ihs_min_lambdas_without_delta, 0)[2:],
                index=range(2, nlags + 1),
            ),
        ),
        (
            pd.Series(np.mean(mean_increases, 0)[2:], index=range(2, nlags + 1)),
            pd.Series(np.std(mean_increases, 0)[2:], index=range(2, nlags + 1)),
        ),
    )
In [168]:
nlags = 40
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)


(
    (mean_alphas, std_alphas),
    (mean_deltas, std_deltas),
    (mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
    (mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
    (mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
    (mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
    (
        mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (
        mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (mean_stn_min_lambdas, std_stn_min_lambdas),
    (mean_bound_min_lambdas, std_bound_min_lambdas),
    (
        mean_bound_min_lambdas_without_delta_ineq,
        std_bound_min_lambdas_without_delta_ineq,
    ),
    (mean_ihs_min_lambdas, std_ihs_min_lambdas),
    (mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
    (mean_increases, std_increases),
) = averaged_mean_of_alphas_and_deltas(nlags)

std_bar_alphas = np.transpose(
    np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
    np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
    np.array(
        [
            mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
            mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
        ]
    )
)
std_bar_one_over_one_pl_delta = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
            mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
            mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
        ]
    )
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
            mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            - std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            + std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            - std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            + std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_stn_min_lambdas = np.transpose(
    np.array(
        [
            mean_stn_min_lambdas - std_stn_min_lambdas,
            mean_stn_min_lambdas + std_stn_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas - std_bound_min_lambdas,
            mean_bound_min_lambdas + std_bound_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas_without_delta_ineq
            - std_bound_min_lambdas_without_delta_ineq,
            mean_bound_min_lambdas_without_delta_ineq
            + std_bound_min_lambdas_without_delta_ineq,
        ]
    )
)
std_bar_ihs_min_lambdas = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas - std_ihs_min_lambdas,
            mean_ihs_min_lambdas + std_ihs_min_lambdas,
        ]
    )
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
            mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
        ]
    )
)
std_bar_increases = np.transpose(
    np.array([mean_increases - std_increases, mean_increases + std_increases])
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [169]:
kwargs = dict(
    xtitle="N (matrix size)",
    height=600,
    calc_xticks=True,
    fill_along_axis=False,
    fill_only_positive=False,
)


fig = plot_stats(
    mean_alphas.values,
    xs=mean_alphas.index.values,
    conf_intvs=std_bar_alphas,
    name="Average alpha",
    color="darkblue",
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

plot_stats(
    mean_deltas.values,
    xs=mean_deltas.index.values,
    conf_intvs=std_bar_deltas,
    name="Average 2-norm of delta",
    color="tab:red",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)


fig = plot_stats(
    mean_delta_over_one_plus_delta.values,
    xs=mean_delta_over_one_plus_delta.index.values,
    conf_intvs=std_bar_delta_over_one_plus_delta,
    name="delta / (1 + delta)",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_stats(
    mean_one_over_one_pl_delta.values,
    xs=mean_one_over_one_pl_delta.index.values,
    conf_intvs=std_bar_one_over_one_pl_delta,
    name="1 / (1 + delta)",
    color="pink",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_stats(
    mean_min_eig_eye_pl_deltas.values,
    xs=mean_min_eig_eye_pl_deltas.index.values,
    conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="min_eig_(eye_pl_delta)",
    color="tab:cyan",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

fig = plot_stats(
    mean_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_min_lmb_pl_1mlmb_alphas,
    name="min_lmb +  (1 - min_lmb) * alphas",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

display(fig)


fig = plot_stats(
    mean_stn_min_lambdas.values,
    xs=mean_stn_min_lambdas.index.values,
    conf_intvs=std_bar_stn_min_lambdas,
    name="stn_min_lambdas",
    color="darkblue",
    yscale="log",
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_ihs_min_lambdas_without_delta.values,
    xs=mean_ihs_min_lambdas_without_delta.index.values,
    conf_intvs=std_bar_ihs_min_lambdas_without_delta,
    name="ihs_min_lambdas_without_delta",
    color="tab:red",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_increases.values,
    xs=mean_increases.index.values,
    conf_intvs=std_bar_increases,
    name="increases",
    color="gray",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_stats(
    mean_bound_min_lambdas.values,
    xs=mean_bound_min_lambdas.index.values,
    conf_intvs=std_bar_bound_min_lambdas,
    name="bound_min_lambdas",
    color="tab:green",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

fig = plot_stats(
    mean_bound_min_lambdas_without_delta_ineq.values,
    xs=mean_bound_min_lambdas_without_delta_ineq.index.values,
    conf_intvs=std_bar_bound_min_lambdas_without_delta_ineq,
    name="bound_min_lambdas_without_delta_ineq",
    color="tab:cyan",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)


fig = plot_stats(
    mean_ihs_min_lambdas.values,
    xs=mean_ihs_min_lambdas.index.values,
    conf_intvs=std_bar_ihs_min_lambdas,
    name="ihs_min_lambdas",
    color="tab:orange",
    yscale="log",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

display(fig)
In [164]:
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)

fig = plot_ts(
    mean_min_lmb_pl_1mlmb_alphas,
    #     std=std_bar_min_lmb_pl_1mlmb_alphas,
    name="$\lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
    color="darkblue",
    height=800,
    #     fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_min_eig_eye_pl_deltas,
    #     conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="$\lambda_{1}(I + \Delta)$",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_one_over_one_pl_delta,
    #     conf_intvs=std_bar_one_over_one_pl_delta,
    name="$\dfrac{1}{1 + ||\Delta||}$",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

display(fig)
In [164]:
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)

fig = plot_ts(
    mean_min_lmb_pl_1mlmb_alphas,
    #     std=std_bar_min_lmb_pl_1mlmb_alphas,
    name="$\lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
    color="darkblue",
    height=800,
    #     fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_min_eig_eye_pl_deltas,
    #     conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="$\lambda_{1}(I + \Delta)$",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_one_over_one_pl_delta,
    #     conf_intvs=std_bar_one_over_one_pl_delta,
    name="$\dfrac{1}{1 + ||\Delta||}$",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

display(fig)
In [35]:
nlags = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)


(
    (mean_alphas, std_alphas),
    (mean_deltas, std_deltas),
    (mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
    (mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
    (mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
    (mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
    (mean_stn_min_lambdas, std_stn_min_lambdas),
    (mean_bound_min_lambdas, std_bound_min_lambdas),
    (
        mean_bound_min_lambdas_without_delta_ineq,
        std_bound_min_lambdas_without_delta_ineq,
    ),
    (mean_ihs_min_lambdas, std_ihs_min_lambdas),
    (mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
    (mean_increases, std_increases),
) = averaged_mean_of_alphas_and_deltas(nlags)
In [164]:
nlags = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)


(
    (mean_alphas, std_alphas),
    (mean_deltas, std_deltas),
    (mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
    (mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
    (mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
    (mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
    (
        mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (
        mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
    ),
    (mean_stn_min_lambdas, std_stn_min_lambdas),
    (mean_bound_min_lambdas, std_bound_min_lambdas),
    (
        mean_bound_min_lambdas_without_delta_ineq,
        std_bound_min_lambdas_without_delta_ineq,
    ),
    (mean_ihs_min_lambdas, std_ihs_min_lambdas),
    (mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
    (mean_increases, std_increases),
) = averaged_mean_of_alphas_and_deltas(nlags)

std_bar_alphas = np.transpose(
    np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
    np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
    np.array(
        [
            mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
            mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
        ]
    )
)
std_bar_one_over_one_pl_delta = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
            mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
            mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
        ]
    )
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
            mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            - std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
            mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
            + std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
    np.array(
        [
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            - std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
            mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
            + std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
        ]
    )
)
std_bar_stn_min_lambdas = np.transpose(
    np.array(
        [
            mean_stn_min_lambdas - std_stn_min_lambdas,
            mean_stn_min_lambdas + std_stn_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas - std_bound_min_lambdas,
            mean_bound_min_lambdas + std_bound_min_lambdas,
        ]
    )
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
    np.array(
        [
            mean_bound_min_lambdas_without_delta_ineq
            - std_bound_min_lambdas_without_delta_ineq,
            mean_bound_min_lambdas_without_delta_ineq
            + std_bound_min_lambdas_without_delta_ineq,
        ]
    )
)
std_bar_ihs_min_lambdas = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas - std_ihs_min_lambdas,
            mean_ihs_min_lambdas + std_ihs_min_lambdas,
        ]
    )
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
    np.array(
        [
            mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
            mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
        ]
    )
)
std_bar_increases = np.transpose(
    np.array([mean_increases - std_increases, mean_increases + std_increases])
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [165]:
kwargs = dict(
    xtitle="h (matrix size)",
    calc_xticks=True,
    major_xstep=1,
    height=300,
)

fig = plot_ts(
    mean_min_lmb_pl_1mlmb_alphas,
    #     std=std_bar_min_lmb_pl_1mlmb_alphas,
    name="$\lambda_{1}(R_h) + \\alpha(1 - \lambda_1(R_h))$",
    color="darkblue",
    #     height=800,
    #     fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_min_eig_eye_pl_deltas,
    #     conf_intvs=std_bar_min_eig_eye_pl_deltas,
    name="$\lambda_{1}(I_h + \Delta_h)$",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)


fig = plot_ts(
    mean_one_over_one_pl_delta,
    #     conf_intvs=std_bar_one_over_one_pl_delta,
    name="$\dfrac{1}{1 + ||\Delta_h||}$",
    color="tab:green",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

display(fig)
In [166]:
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)

fig = plot_stats(
    mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
    #     fill_along_axis=False,
    name="$\lambda_{1}(I_h + \Delta_h) - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
    color="darkblue",
    #     conf_alpha=0.1,
    height=300,
    #     fig=fig,
    **kwargs
)


fig = plot_stats(
    mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.values,
    xs=mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.index.values,
    conf_intvs=std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
    #     fill_along_axis=False,
    name="$\dfrac{1}{1 + ||\Delta_h||} - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
    color="tab:orange",
    #     conf_alpha=0.1,
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
# ax.set_ylim(bottom=0.8)

display(fig)
In [162]:
kwargs = dict(
    xtitle="h (matrix size)",
    height=300,
    calc_xticks=True,
    fill_along_axis=False,
    fill_only_positive=False,
)


fig = plot_stats(
    mean_alphas.values,
    xs=mean_alphas.index.values,
    conf_intvs=std_bar_alphas,
    name="$\\alpha_h$",
    color="darkblue",
    **kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)

plot_stats(
    mean_deltas.values,
    xs=mean_deltas.index.values,
    conf_intvs=std_bar_deltas,
    name="$||\Delta||_2$",
    color="tab:orange",
    fig=fig,
    **kwargs
)
ax_settings(fig=fig, showgrid=True)

display(fig)

Eigenvalues of Autocorrelation Matrices and Circulant Ones¶

In [178]:
# autocorr, circ_matrix, dft, eigenvalues and their bounds
def autocorr(ts, nlags):
    acv = acovf(ts, fft=False, nlag=nlags)
    acv /= acv[0]
    return acv


def autocorr_matrix(ts, nlags):
    return cov_matrix(autocorr(ts, nlags))


def circ_matrix(first_row):
    n = len(first_row)
    mat = np.zeros((n, n))
    for i in range(n):
        if i > 0:
            mat[i, :i] = first_row[-i:]
        mat[i, i:] = first_row[: n - i]
    return mat


def circ_matrix_from_autocorr(
    acf,
    alphas=None,
    m=None,
    len_pow_of_2=False,
):
    n = len(acf)
    if m is None:
        if alphas is not None:
            m = 2 * n + len(alphas) - 1
        elif len_pow_of_2:
            m = 1 << n.bit_length()
        else:
            m = 2 * n
    first_row = np.zeros(m)
    first_row[:n] = acf
    if alphas is not None:
        first_row[n : m - n + 1] = alphas
    first_row[m - n + 1 :] = np.flipud(acf[1:])
    return circ_matrix(first_row)


def dft(x):
    """
    Function to calculate the
    discrete Fourier Transform
    of a 1D real-valued signal x
    """
    N = len(x)
    n = np.arange(N)
    k = n.reshape((N, 1))
    e = np.exp(-2j * np.pi * k * n / N)
    X = np.dot(e, x)
    return X


def dft_matrix(n):
    X = np.zeros((n, n), dtype=np.complex)
    for j in range(n):
        for k in range(n):
            X[j, k] = np.exp((2j * np.pi / n) * j * k)
    return X


def sorted_eig_values(matrix):
    return np.sort(np.real(np.linalg.eigvals(matrix)))

Ferreira's Bounds on Each Eigenvalue via Embedding in Circulant Matrix¶

In [24]:
%%html
<!--hide_me_please-->
<p class="showcode" id="012345gfdcvbjtaa"><a href="javascript:code_toggle('#012345gfdcvbjt')">show codes in the notebook</a></p>
<p class="hidecode" id="012345gfdcvbjt"><a href="javascript:code_toggle('#012345gfdcvbjtaa')">hide codes in the notebook</a></p>
In [179]:
# https://doi.org/10.1016/S1474-6670(17)47877-3
# Ferreira's bounds on each eigenvalue of autocorrelation matrix
# based od circulant emebedding
def ferreira_bounds_of_autocorr_eigvals(acf):
    c = circ_matrix_from_autocorr(acf)[0, :]
    n = len(c)
    #     print(n)
    X = dft_matrix(n)
    c_eig_vals = np.real(X @ c)
    #     print(c_eig_vals)
    n = len(c_eig_vals) // 2
    odd_c_eig_vals = np.array(sorted([c_eig_vals[2 * i + 1] for i in range(n)]))
    even_c_eig_vals = np.array(sorted([c_eig_vals[2 * i] for i in range(n)]))
    bounds = np.zeros((2, n))
    for k in range(n):
        # lower
        bounds[0, k] = 0.5 * (even_c_eig_vals[k] + odd_c_eig_vals[0])
        # upper
        bounds[1, k] = 0.5 * (even_c_eig_vals[k] + odd_c_eig_vals[n - 1])
    return bounds
In [23]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="qweertbbbbgdg123454"><a href="javascript:code_toggle('#qweertbbbbgdg123454')">hide codes in the notebook</a></p>
In [172]:
# ferreira's playground
acf_values = autocorr(ihs_trans_ts[5], nlags=16)
C = circ_matrix_from_autocorr(acf_values)
c = C[0, :]
print(c)
n = len(c)
print(n)
X = dft_matrix(n)
c_eig_vals = np.real(X @ c)
n = len(c_eig_vals) // 2
odd_eig_vals = np.array(sorted([c_eig_vals[2 * i + 1] for i in range(n)]))
even_eig_vals = np.array(sorted([c_eig_vals[2 * i] for i in range(n)]))

fig = plot_ts(
    odd_eig_vals,
    linestyle="",
    marker="o",
    xmargin=0.02,
)
display(fig)

fig = plot_ts(
    even_eig_vals,
    linestyle="",
    marker="o",
    xmargin=0.02,
)
display(fig)

print(f"bounds: {ferreira_bounds_of_autocorr_eigvals(acf_values)}")

T = C[:n, :n]
S = C[:n, n:]

# print(T)
# print(S)

TpS = T + S

TmS = T - S

print(f"eigvals of T + S: {sorted_eig_values(TpS)}")

print(f"eigvals of T - S: {sorted_eig_values(TmS)}")
[ 1.          0.7991255   0.48304211  0.30220353  0.14250526 -0.0055492
 -0.0920938  -0.19117584 -0.29003769 -0.34677347 -0.39096839 -0.39945643
 -0.37241226 -0.36264212 -0.35268309 -0.31996008 -0.27113906  0.
 -0.27113906 -0.31996008 -0.35268309 -0.36264212 -0.37241226 -0.39945643
 -0.39096839 -0.34677347 -0.29003769 -0.19117584 -0.0920938  -0.0055492
  0.14250526  0.30220353  0.48304211  0.7991255 ]
34
bounds: [[-1.29509850e+00 -4.55128457e-03 -4.55128457e-03 -4.86274842e-04
  -4.86274842e-04  4.04445261e-02  4.04445261e-02  1.21445309e-01
   1.21445309e-01  1.99534272e-01  1.99534272e-01  3.69885267e-01
   3.69885267e-01  6.27535570e-01  6.27535570e-01  2.46353247e+00
   2.46353247e+00]
 [ 2.48090617e+00  3.77145338e+00  3.77145338e+00  3.77551839e+00
   3.77551839e+00  3.81644919e+00  3.81644919e+00  3.89744998e+00
   3.89744998e+00  3.97553894e+00  3.97553894e+00  4.14588993e+00
   4.14588993e+00  4.40354024e+00  4.40354024e+00  6.23953714e+00
   6.23953714e+00]]
eigvals of T + S: [-2.33603008  0.24506435  0.24506435  0.25319437  0.25319437  0.33505597
  0.33505597  0.49705753  0.49705753  0.65323546  0.65323546  0.99393745
  0.99393745  1.50923806  1.50923806  5.18123186  5.18123186]
eigvals of T - S: [-2.54166915e-01 -2.54166915e-01 -2.41196812e-01 -2.41196812e-01
 -2.39117635e-01 -1.33186735e-02 -1.33186735e-02 -6.93449535e-04
 -6.93449535e-04  3.96930240e-02  3.96930240e-02  6.00103728e-01
  6.00103728e-01  1.19129550e+00  1.19129550e+00  7.29784242e+00
  7.29784242e+00]
In [182]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="mnb34tgfddfghjkloiu"><a href="javascript:code_toggle('#12345gfdcvbjtaa')">hide codes in the notebook</a></p>
In [34]:
C = circ_matrix_from_autocorr([1, 0.6, 0.4, -0.1, -0.2, -0.4])
print(C)
sorted_eig_values(C)
[[ 1.   0.6  0.4 -0.1 -0.2 -0.4  0.  -0.4 -0.2 -0.1  0.4  0.6]
 [ 0.6  1.   0.6  0.4 -0.1 -0.2 -0.4  0.  -0.4 -0.2 -0.1  0.4]
 [ 0.4  0.6  1.   0.6  0.4 -0.1 -0.2 -0.4  0.  -0.4 -0.2 -0.1]
 [-0.1  0.4  0.6  1.   0.6  0.4 -0.1 -0.2 -0.4  0.  -0.4 -0.2]
 [-0.2 -0.1  0.4  0.6  1.   0.6  0.4 -0.1 -0.2 -0.4  0.  -0.4]
 [-0.4 -0.2 -0.1  0.4  0.6  1.   0.6  0.4 -0.1 -0.2 -0.4  0. ]
 [ 0.  -0.4 -0.2 -0.1  0.4  0.6  1.   0.6  0.4 -0.1 -0.2 -0.4]
 [-0.4  0.  -0.4 -0.2 -0.1  0.4  0.6  1.   0.6  0.4 -0.1 -0.2]
 [-0.2 -0.4  0.  -0.4 -0.2 -0.1  0.4  0.6  1.   0.6  0.4 -0.1]
 [-0.1 -0.2 -0.4  0.  -0.4 -0.2 -0.1  0.4  0.6  1.   0.6  0.4]
 [ 0.4 -0.1 -0.2 -0.4  0.  -0.4 -0.2 -0.1  0.4  0.6  1.   0.6]
 [ 0.6  0.4 -0.1 -0.2 -0.4  0.  -0.4 -0.2 -0.1  0.4  0.6  1. ]]
Out[34]:
array([-0.2       , -0.2       , -0.13205081, -0.13205081,  0.4       ,
        0.4       ,  1.2       ,  1.2       ,  1.2       ,  1.6       ,
        3.33205081,  3.33205081])
In [35]:
x = np.zeros(2 * n - 3)
x[: n - 1] = acf[:-1][::-1]
x[n - 2 :] = acf[:-1]
print(x)
C = circ_matrix_from_autocorr(acf, alphas=x)
print(np.min(sorted_eig_values(C)))
print(C.shape)
(C == np.transpose(C)).all()
[-0.0920938  -0.0055492   0.14250526  0.30220353  0.48304211  0.7991255
  1.          0.7991255   0.48304211  0.30220353  0.14250526 -0.0055492
 -0.0920938 ]
-0.1353861192147266
(28, 28)
Out[35]:
True
In [181]:
%%html
<!--hide_me_please-->
<p class="hidecode" id="qwe34tgfddfghjkloiu"><a href="javascript:code_toggle('#12345gfdcvbjtaa')">hide codes in the notebook</a></p>
In [149]:
# circ_matrix([1, 2, 3, 4])
In [155]:
# plot_eigenvalues_ferreira_stats
def plot_eigenvalues_ferreira_stats(ts_idx, nlags):
    n = nlags + 1
    ts = standard_trans_ts[ts_idx]
    standard_acf = autocorr(ts, nlags=nlags)
    standard_eig_vals = sorted_eig_values(cov_matrix(standard_acf))

    ts = ihs_trans_ts[ts_idx]
    ihs_acf = autocorr(ts, nlags=nlags)
    ihs_eig_vals = sorted_eig_values(cov_matrix(ihs_acf))

    fig = plot_stats(standard_acf, title="Autocorrelation", label="standard")
    plot_stats(ihs_acf, color="tab:red", label="IHS", showgrid=True, fig=fig)
    display(fig)

    kwargs = dict(
        title="Ferreira's Bounds on Eigenvalues of Autocorrelation Matrix",
        subtitle="Autocorrelation matrix of size n x n",
        xtitle="kth eigen value",
        xmargin=0.02,
        yscale="log",
    )

    fig = None
    for eig_vals, acf, ts_type, color in [
        (standard_eig_vals, standard_acf, "standard", "tab:blue"),
        (ihs_eig_vals, ihs_acf, "IHS", "tab:red"),
    ]:
        fig = plot_ts(
            eig_vals,
            color=color,
            linestyle="",
            marker="o",
            name=ts_type,
            showgrid=True,
            **kwargs,
        )
        kwargs = dict(fig=fig)
        ax = fig.get_axes()[0]

        yerr = ferreira_bounds_of_autocorr_eigvals(acf)
        print(f"bounds: {yerr}")
        y = yerr[0, :].copy()
        yerr[1, :] -= y
        yerr[0, :] -= y
        x = np.arange(len(y))
        ax.errorbar(
            x, y, yerr, fmt="none", color=color, linewidth=2.5, capsize=10, alpha=0.5
        )

    display(fig)
In [244]:
%%html
<!--hide_me_please-->
<p class="showcode" id="12345gfdcvbjtaa"><a href="javascript:code_toggle('#12345gfdcvbjt')">show codes in the notebook</a></p>
<p class="hidecode" id="12345gfdcvbjt"><a href="javascript:code_toggle('#12345gfdcvbjtaa')">hide codes in the notebook</a></p>
In [156]:
ts_idx = 5
nlags = 7
plot_eigenvalues_ferreira_stats(ts_idx, nlags)
bounds: [[4.40531913e-04 3.78405160e-03 3.78405160e-03 7.02092971e-02
  7.02092971e-02 2.48216710e-01 2.48216710e-01 2.16059070e+00]
 [2.25299810e+00 2.25634162e+00 2.25634162e+00 2.32276687e+00
  2.32276687e+00 2.50077428e+00 2.50077428e+00 4.41314827e+00]]
bounds: [[2.84106392e-03 1.13671887e-02 1.13671887e-02 1.25548433e-01
  1.25548433e-01 4.51605266e-01 4.51605266e-01 1.81204904e+00]
 [2.06822310e+00 2.07674922e+00 2.07674922e+00 2.19093047e+00
  2.19093047e+00 2.51698730e+00 2.51698730e+00 3.87743108e+00]]
In [174]:
%%html
<!--hide_me_please-->
<p class="showcode" id="qq12345gfdcvbjtaa"><a href="javascript:code_toggle('#qq12345gfdcvbjt')">show codes in the notebook</a></p>
<p class="hidecode" id="qq12345gfdcvbjt"><a href="javascript:code_toggle('#qq12345gfdcvbjtaa')">hide codes in the notebook</a></p>
In [167]:
ts_idx = 5
nlags = 16
plot_eigenvalues_ferreira_stats(ts_idx, nlags)
bounds: [[-1.68058691e+00 -7.74974497e-03 -7.74974497e-03 -4.50730496e-03
  -4.50730496e-03  2.80191150e-02  2.80191150e-02  8.60565311e-02
   8.60565311e-02  1.45021888e-01  1.45021888e-01  2.95085346e-01
   2.95085346e-01  5.32213157e-01  5.32213157e-01  2.85081252e+00
   2.85081252e+00]
 [ 2.80854666e+00  4.48138383e+00  4.48138383e+00  4.48462627e+00
   4.48462627e+00  4.51715269e+00  4.51715269e+00  4.57519010e+00
   4.57519010e+00  4.63415546e+00  4.63415546e+00  4.78421892e+00
   4.78421892e+00  5.02134673e+00  5.02134673e+00  7.33994609e+00
   7.33994609e+00]]
bounds: [[-1.29509850e+00 -4.55128457e-03 -4.55128457e-03 -4.86274842e-04
  -4.86274842e-04  4.04445261e-02  4.04445261e-02  1.21445309e-01
   1.21445309e-01  1.99534272e-01  1.99534272e-01  3.69885267e-01
   3.69885267e-01  6.27535570e-01  6.27535570e-01  2.46353247e+00
   2.46353247e+00]
 [ 2.48090617e+00  3.77145338e+00  3.77145338e+00  3.77551839e+00
   3.77551839e+00  3.81644919e+00  3.81644919e+00  3.89744998e+00
   3.89744998e+00  3.97553894e+00  3.97553894e+00  4.14588993e+00
   4.14588993e+00  4.40354024e+00  4.40354024e+00  6.23953714e+00
   6.23953714e+00]]
In [395]:
%%html
<!--hide_me_please-->
<p class="showcode" id="rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

Bounds on Extreme Eigenvalues of Autocorrelation Matrices¶

In [154]:
# plot_extr_eigenvalues_bounds
def plot_extr_eigenvalues_bounds(
    ts_idx, nlags, name, bounds_fun, plot_autocorr=False, print_bounds=False
):
    ts = standard_trans_ts[ts_idx]
    standard_acf = autocorr(ts, nlags=nlags)
    ts = ihs_trans_ts[ts_idx]
    ihs_acf = autocorr(ts, nlags=nlags)

    if plot_autocorr:
        fig = plot_stats(standard_acf, title="Autocorrelation", label="standard")
        plot_stats(ihs_acf, color="tab:red", label="IHS", showgrid=True, fig=fig)
        display(fig)

    bounds_of_max = {}
    bounds_of_min = {}
    for acf, ts_type in [
        (standard_acf, "standard"),
        (ihs_acf, "IHS"),
    ]:
        bmax, bmin = bounds_fun(acf, nlags)
        bounds_of_max[ts_type] = bmax
        bounds_of_min[ts_type] = bmin

    min_max_etr = "Extreme"
    if bmax is None:
        min_max_etr = "Minimal"
    if bmin is None:
        min_max_etr = "Maximal"

    fig = None
    kwargs = dict(
        title=f"{name}'s Bounds on {min_max_etr} Eigenvalues of Autocorrelation Matrix",
        calc_xticks=True,
        major_xstep=np.ceil(nlags / 30),
        xmargin=0.02,
        yscale="log",
        xtitle="lags (matrix of size: lags * lags)",
    )
    for acf, ts_type, color in [
        (standard_acf, "standard", "tab:blue"),
        (ihs_acf, "IHS", "tab:red"),
    ]:
        min_eigvals = np.zeros(nlags)
        max_eigvals = np.zeros(nlags)
        for i, lags in enumerate(range(1, nlags + 1)):
            #             print(f"acf: {acf[:lags]}")
            eigvals = sorted_eig_values(cov_matrix(acf[:lags]))
            min_eigvals[i] = np.min(eigvals)
            max_eigvals[i] = np.max(eigvals)
        #         print(f"min_eigvals: {min_eigvals}")
        #         print(f"max_eigvals: {max_eigvals}")

        xs = np.arange(1, nlags + 1)

        if bmin is not None:
            fig = plot_ts(
                min_eigvals,
                index_values=xs,
                color=color,
                linestyle="",
                marker="o",
                name=ts_type,
                showgrid=True,
                **kwargs,
            )
            kwargs = dict(fig=fig)
        if bmax is not None:
            fig = plot_ts(
                max_eigvals,
                index_values=xs,
                color=color,
                linestyle="",
                marker="o",
                showgrid=True,
                **kwargs,
            )
            kwargs = dict(fig=fig)

        ax = fig.get_axes()[0]
        bmax = bounds_of_max[ts_type]
        bmin = bounds_of_min[ts_type]
        if print_bounds:
            print(f"bounds_of_max: {bmax}")
            print(f"bounds_of_min: {bmin}")
        for bounds in [bmax, bmin]:
            if bounds is not None:
                yerr = bounds.copy()
                y = yerr[0, :].copy()
                yerr[1, :] -= y
                yerr[0, :] -= y
                ax.errorbar(
                    xs,
                    y,
                    yerr,
                    fmt="none",
                    color=color,
                    linewidth=2.5,
                    capsize=10,
                    alpha=0.5,
                )

    display(fig)
In [51]:
%%html
<!--hide_me_please-->
<p class="showcode" id="765rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#765rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="765rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#765rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

Dembo's Bounds on Extreme Eigenvalues¶

In [65]:
# Dembo's bounds on extreme eigenvalues of autocorrelation matrices
# of size p x p for p = 1, 2, ..., max_nlags
# https://doi.org/10.1109/18.2651
def dembo_bounds_of_autocorr_extr_eigvals(acf, max_nlags):
    c = acf[0]
    bounds_of_max = np.full((2, max_nlags), fill_value=c)
    bounds_of_min = np.full((2, max_nlags), fill_value=c)
    for p in range(2, max_nlags + 1):
        i = p - 1
        eta_1 = bounds_of_min[0, i - 1]
        eta_pm1 = bounds_of_max[1, i - 1]
        b = acf[1:p]

        b_dot_b = np.dot(b, b)
        sq_c_m_eta1_div4_bdotb = np.sqrt(((c - eta_1) ** 2) / 4 + b_dot_b)
        sq_c_m_etapm1_div4_bdotb = np.sqrt(((c - eta_pm1) ** 2) / 4 + b_dot_b)
        c_p_eta1_div2 = (c + eta_1) / 2
        c_p_etapm1_div2 = (c + eta_pm1) / 2

        # lower
        bounds_of_max[0, i] = c_p_eta1_div2 + sq_c_m_eta1_div4_bdotb
        # upper
        bounds_of_max[1, i] = c_p_etapm1_div2 + sq_c_m_etapm1_div4_bdotb

        # lower
        bounds_of_min[0, i] = c_p_eta1_div2 - sq_c_m_eta1_div4_bdotb
        # upper
        bounds_of_min[1, i] = c_p_etapm1_div2 - sq_c_m_etapm1_div4_bdotb

    return bounds_of_max, bounds_of_min
In [66]:
ts_idx = 5
nlags = 15
plot_extr_eigenvalues_bounds(
    ts_idx, nlags, "Dembo", dembo_bounds_of_autocorr_extr_eigvals, plot_autocorr=True
)
In [412]:
%%html
<!--hide_me_please-->
<p id="1234765dfghnbrtyaa"></p>
<p class="hidecode" id="1234765dfghnbrty"><a href="javascript:code_toggle('#1234765dfghnbrtyaa')">hide codes in the notebook</a></p>

Ma and Zarowski's Bounds on Minimal Eigenvalue¶

In [225]:
# Ma and Zarowski's bounds on minimal eigenvalues of autocorrelation matrices
# of size p x p for p = 1, 2, ..., max_nlags
# https://doi.org/10.1109/TIT.2008.928966
def ma_and_zarowski_bounds_of_autocorr_min_eigvals(acf, max_nlags):
    t0 = acf[0]
    bounds_of_max = None
    bounds_of_min = np.full((2, max_nlags), fill_value=t0)
    bounds_of_min[1, 0] += 1e-5
    for p in range(2, max_nlags + 1):
        i = p - 1
        R_nm1 = cov_matrix(acf[: p - 1])
        R_n = cov_matrix(acf[:p])
        eta_1 = bounds_of_min[0, i - 1]
        eta_pm1 = bounds_of_min[1, i - 1]
        t0_plus_eta1 = t0 + eta_1
        t0_plus_etapm1 = t0 + eta_pm1
        d1 = np.linalg.det(R_n) / np.linalg.det(R_nm1)

        #         R_nm1_inv1 = np.linalg.inv(cov_matrix(acf[: p - 1]))
        #         b = np.flipud(acf[1:p])
        #         d1 = t0 - b @ R_nm1_inv1 @ b

        sq_t0_plus_eta1_m_4_d1_eta1 = np.sqrt((t0_plus_eta1 ** 2) - 4 * d1 * eta_1)
        sq_t0_plus_etapm1_m_4_d1_etapm1 = np.sqrt(
            (t0_plus_etapm1 ** 2) - 4 * d1 * eta_pm1
        )

        # lower
        bounds_of_min[0, i] = 0.5 * (t0_plus_eta1 - sq_t0_plus_eta1_m_4_d1_eta1)
        # upper
        bounds_of_min[1, i] = 0.5 * (t0_plus_etapm1 + sq_t0_plus_etapm1_m_4_d1_etapm1)

    return bounds_of_max, bounds_of_min
In [41]:
%%html
<!--hide_me_please-->
<p id="12378asdfghtrhnbrtyaa"></p>
<p class="hidecode" id="12378asdfghtrhnbrty"><a href="javascript:code_toggle('#12378asdfghtrhnbrtyaa')">hide codes in the notebook</a></p>
In [226]:
ts_idx = 5
nlags = 20
plot_extr_eigenvalues_bounds(
    ts_idx,
    nlags,
    "Ma and Zarowski",
    ma_and_zarowski_bounds_of_autocorr_min_eigvals,
    plot_autocorr=True,
)
In [106]:
%%html
<!--hide_me_please-->
<p class="showcode" id="b12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#b12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="b12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#b12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
In [118]:
%%html
<!--hide_me_please-->
<script>initialize();</script>
<p class="showcode" id="fnitohaefw4343aa"><a href="javascript:code_toggle('#fnitohaefw4343')">show codes in the notebook</a></p>
<p class="hidecode" id="fnitohaefw4343"><a href="javascript:code_toggle('#fnitohaefw4343aa')">hide codes in the notebook</a></p>

Sun's Bounds on Minimal Eigenvalue¶

In [219]:
# # Sun's bounds on minimal eigenvalues of autocorrelation matrices
# # of size p x p for p = 1, 2, ..., max_nlags
# # https://doi.org/10.1109/18.887894
def sun_bounds_of_autocorr_min_eigvals(acf, max_nlags):
    t0 = acf[0]
    bounds_of_max = None
    bounds_of_min = np.full((2, max_nlags), fill_value=t0)

    for p in range(2, max_nlags + 1):
        i = p - 1
        R_nm1_inv1 = np.linalg.inv(cov_matrix(acf[: p - 1]))
        R_nm1_inv2 = R_nm1_inv1 @ R_nm1_inv1
        R_nm1_inv3 = R_nm1_inv2 @ R_nm1_inv1
        eta_1 = bounds_of_min[0, i - 1]
        eta_pm1 = bounds_of_min[1, i - 1]
        #         print(f"eta1={eta_1}, eta_pm1={eta_pm1}")
        b = np.flipud(acf[1:p])
        d1 = t0 - b @ R_nm1_inv1 @ b
        d2 = 1 + b @ R_nm1_inv2 @ b
        d3eta1 = d2 - b @ R_nm1_inv3 @ b * eta_1
        d3etapm1 = d2 - b @ R_nm1_inv3 @ b * eta_pm1

        d1_d2eta1 = d1 + d2 * eta_1
        d1_d2etapm1 = d1 + d2 * eta_pm1

        sq_d1_plus_d2eta1_m_4d1d3eta1 = np.sqrt(
            (d1_d2eta1 ** 2) - 4 * d1 * d3eta1 * eta_1
        )
        sq_d1_plus_d2etapm1_m_4d1d3etapm1 = np.sqrt(
            (d1_d2etapm1 ** 2) - 4 * d1 * d3etapm1 * eta_pm1
        )

        # lower
        bounds_of_min[0, i] = 0.5 * (d1_d2eta1 - sq_d1_plus_d2eta1_m_4d1d3eta1) / d3eta1
        # upper
        bounds_of_min[1, i] = (
            0.5 * (d1_d2etapm1 - sq_d1_plus_d2etapm1_m_4d1d3etapm1) / d3etapm1
        ) + 0.1 * eta_pm1

    return bounds_of_max, bounds_of_min
In [38]:
%%html
<!--hide_me_please-->
<p id="asdfghtrhnbrtyaa"></p>
<p class="hidecode" id="asdfghtrhnbrty"><a href="javascript:code_toggle('#asdfghtrhnbrtyaa')">hide codes in the notebook</a></p>
In [220]:
ts_idx = 5
nlags = 30
plot_extr_eigenvalues_bounds(
    ts_idx, nlags, "Sun", sun_bounds_of_autocorr_min_eigvals, plot_autocorr=True
)
In [230]:
%%html
<!--hide_me_please-->
<p class="showcode" id="ytght12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#ytght12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="ytght12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#ytght12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

Statistics of Autocorrelation Matrices' Determinants¶

In [229]:
%%html
<!--hide_me_please-->
<p id="qwefdcv12345gfderthbvbvaa"></p>
<p class="hidecode" id="qwefdcv12345gfderthbvbv"><a href="javascript:code_toggle('#qwefdcv12345gfderthbvbvaa')">hide codes in the notebook</a></p>
In [236]:
# plot_mean_det
def plot_mean_det(begins, ends, title=None, subtitle=None, p=1, nlags=20):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    if title is None:
        title = f"Mean Determinant"
    kwargs = dict(
        title=title,
        subtitle=subtitle,
        calc_xticks=True,
        yscale="log",
        xtitle="lag (matrix of size: lag x lag)",
    )
    fig = None
    for ts, ts_name, color in [
        (standard_trans_ts, "standard", "tab:blue"),
        (ihs_trans_ts, "IHS", "tab:red"),
    ]:
        mean_dets = np.zeros(nlags)
        std_dets = np.zeros(nlags)
        for lags in np.arange(1, nlags + 1):
            dets = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[ts_idx][begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                dets.append(np.linalg.det(mcov))
            mean_dets[lags - 1] = np.mean(dets)
            std_dets[lags - 1] = np.std(dets)
        fig = plot_stats(
            mean_dets,
            xs=np.arange(1, nlags + 1),
            std=std_dets[0:],
            std_xs=np.arange(1, nlags + 1),
            fill_along_axis=False,
            fill_only_positive=False,
            label=ts_name,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    ax_settings(fig=fig, showgrid=True)
    display(fig)
In [228]:
%%html
<!--hide_me_please-->
<p id="mnbvc2343rfsfsfsfsaa"></p>
<p class="hidecode" id="mnbvc2343rfsfsfsfs"><a href="javascript:code_toggle('#mnbvc2343rfsfsfsfsaa')">hide codes in the notebook</a></p>
In [235]:
# Mean Determinant
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2

plot_mean_det(
    begins,
    ends,
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    p=p,
    nlags=nlags,
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [237]:
%%html
<!--hide_me_please-->
<p id="1qwefdcv12345gfderthbvbvaa"></p>
<p class="hidecode" id="1qwefdcv12345gfderthbvbv"><a href="javascript:code_toggle('#1qwefdcv12345gfderthbvbvaa')">hide codes in the notebook</a></p>
In [246]:
# plot_mean_det_ratios
def plot_mean_det_ratios(begins, ends, title=None, subtitle=None, p=1, nlags=20):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    if title is None:
        title = "Mean Ration det(R_p) / det(R_{p-1})"
    kwargs = dict(
        title=title,
        subtitle=subtitle,
        calc_xticks=True,
        yscale="log",
        xtitle="lag (matrix of size: lag x lag)",
    )
    fig = None
    for ts, ts_name, color in [
        (standard_trans_ts, "standard", "tab:blue"),
        (ihs_trans_ts, "IHS", "tab:red"),
    ]:
        mean_det_rs = np.zeros(nlags - 1)
        std_dets_rs = np.zeros(nlags - 1)
        for lags in np.arange(2, nlags + 1):
            dets = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[ts_idx][begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                mcov_pm1 = cov_matrix(acov[:-1])
                dets.append(np.linalg.det(mcov) / np.linalg.det(mcov_pm1))
            mean_det_rs[lags - 2] = np.mean(dets)
            std_dets_rs[lags - 2] = np.std(dets)
        fig = plot_stats(
            mean_det_rs,
            xs=np.arange(2, nlags + 1),
            std=std_dets_rs[0:],
            std_xs=np.arange(2, nlags + 1),
            fill_along_axis=False,
            fill_only_positive=False,
            label=ts_name,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    ax_settings(fig=fig, showgrid=True)
    display(fig)
In [238]:
%%html
<!--hide_me_please-->
<p id="1mnbvc2343rfsfsfsfsaa"></p>
<p class="hidecode" id="1mnbvc2343rfsfsfsfs"><a href="javascript:code_toggle('#1mnbvc2343rfsfsfsfsaa')">hide codes in the notebook</a></p>
In [247]:
# Mean Determinant
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2

plot_mean_det_ratios(
    begins,
    ends,
    subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
    p=p,
    nlags=nlags,
)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [39]:
%%html
<!--hide_me_please-->
<p class="showcode" id="12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

AR Coefficients Plotting¶

In [42]:
def ar_coeffs(x, p, method):
    if method == ols:
        xlags, x0 = lagmat(x, p, original="sep", trim="both")
        xlags = add_constant(xlags)
        params = lstsq(xlags[p:, : p + 1], x0[p:], rcond=None)[0]
        ar_coeffs = params[1:]
    else:
        pacf_values = pacf(x, method=method, nlags=p)
        ar_coeffs, _ = ar_from_pacf(pacf_values, nlags=p)
    return ar_coeffs
In [37]:
# def plot_ar_diverse_intervals_experiments
def plot_ar_for_intervals(ts, ts_type, begins, ends, ps, method, p, **kwargs):
    for begin, end, s in zip(begins, ends, ps):
        color = (0, 1 - s * 0.5, 0)
        values = ar_coeffs(ts[begin:end], p, method)
        plot_stats(
            values,
            label=f"on [{begin},{end-1}]",
            color=color,
            **kwargs,
        )
    if fig is not None:
        return fig


def plot_ar_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    intervals_type,
    methods,
    p,
    ts_list,
    title=None,
    subtitle=None,
    **kwargs,
):
    const_title = title is not None
    const_subtitle = subtitle is not None
    for method in methods:
        method_descr = methods_descrs[method]
        if not const_title:
            title = f"{method.upper()} AR({p}) Coeffs Computed on {intervals_type}"
        fig, axs = fig_with_vertical_subplots(
            len(ts_list),
            title=title,
            subplots_titles=True,
            ax_height=450,
            fontsize=15,
        )
        for (ts, ts_type), ax in zip(ts_list, axs):
            if not const_subtitle:
                subtitle = f"{method_descr} for {ts_type}"
            plot_ar_for_intervals(
                ts,
                ts_type,
                begins,
                ends,
                ps,
                method,
                p,
                ax=ax,
                subtitle=subtitle,
                **kwargs,
            )
        display(fig)
In [413]:
%%html
<!--hide_me_please-->
<p id="qw1234765dfghnbrtyaa"></p>
<p class="hidecode" id="qw1234765dfghnbrty"><a href="javascript:code_toggle('#qw1234765dfghnbrtyaa')">hide codes in the notebook</a></p>
In [394]:
# ld-biased ar experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_ar_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ld_biased],
    p=20,
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    showgrid=False,
)
In [416]:
%%html
<!--hide_me_please-->
<p class="showcode" id="hgfds45ygfsdfghjtraa"><a href="javascript:code_toggle('#hgfds45ygfsdfghjtr')">show codes in the notebook</a></p>
<p class="hidecode" id="hgfds45ygfsdfghjtr"><a href="javascript:code_toggle('#hgfds45ygfsdfghjtraa')">hide codes in the notebook</a></p>
In [40]:
# ols ar experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_ar_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ols],
    p=20,
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
)
In [396]:
%%html
<!--hide_me_please-->
<p class="showcode" id="1rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#1rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="1rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#1rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

AR Coefficients Statistics¶

In [44]:
# def ar_means_and_stds
def ar_means_and_stds(
    ts,
    begins,
    ends,
    method,
    p=20,
    relative=False,
    relative_to_abs_sum=False,
    margin=None,
):
    n = len(begins)
    assert len(ends) == n
    ar_coeffs_values = np.zeros((n, p), dtype=float)
    for i, (begin, end) in enumerate(zip(begins, ends)):
        res = ar_coeffs(ts[begin:end], method=method, p=p)
        ar_coeffs_values[i, :] = res
    mu = np.mean(ar_coeffs_values, 0)
    std = np.std(ar_coeffs_values, 0)
    if margin is not None:
        if margin == "std":
            margin = std
        mu[np.abs(mu) < margin] = np.nan
    if relative:
        std *= 100 / mu
    if relative_to_abs_sum:
        std /= np.sum(np.abs(ar_coeffs_values), 0)
    return mu, std
In [328]:
# Mean AR(p) Coefficients With Standard Deviations at Lags
for method in [ld_biased, ols]:
    ts_idx = 5
    p = 20
    ts_len = 2000
    step = 750
    begin = 0
    begins = np.arange(begin, 8001 - ts_len, step)
    ends = np.arange(begin + ts_len, 8001, step)
    mu_standard, std_standard = ar_means_and_stds(
        standard_trans_ts[ts_idx], begins, ends, method=method, p=p
    )
    std_bar_standard = np.transpose(
        np.array([mu_standard - std_standard, mu_standard + std_standard])
    )
    mu_ihs, std_ihs = ar_means_and_stds(
        ihs_trans_ts[ts_idx], begins, ends, method=method, p=p
    )
    std_bar_ihs = np.transpose(np.array([mu_ihs - std_ihs, mu_ihs + std_ihs]))
    index_values = np.arange(1, p + 1)
    fig = plot_stats(
        mu_standard,
        xs=index_values,
        conf_intvs=std_bar_standard,
        label="standard",
        title=f"Mean AR({p}) Coefficients With Standard Deviations",
        subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        showgrid=True,
        ax_height=300,
        calc_xticks=True,
        #         major_xticks_loc=index_values,
    )
    display(fig)
    fig = plot_stats(
        mu_ihs,
        xs=index_values,
        conf_intvs=std_bar_ihs,
        color="tab:red",
        label="IHS",
        title=f"Mean AR({p}) Coefficients With Standard Deviations",
        subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        showgrid=True,
        ax_height=300,
        calc_xticks=True,
        #         major_xticks_loc=index_values,
    )
    #     fig.get_axes()
    display(fig)
In [330]:
# Mean AR(p) Coefficients With Standard Deviations at Lags
for method in [ld_biased, ols]:
    ts_idx = 5
    p = 50
    ts_len = 2000
    step = 750
    begin = 0
    begins = np.arange(begin, 8001 - ts_len, step)
    ends = np.arange(begin + ts_len, 8001, step)
    mu_standard, std_standard = ar_means_and_stds(
        standard_trans_ts[ts_idx], begins, ends, method=method, p=p
    )
    std_bar_standard = np.transpose(
        np.array([mu_standard - std_standard, mu_standard + std_standard])
    )
    mu_ihs, std_ihs = ar_means_and_stds(
        ihs_trans_ts[ts_idx], begins, ends, method=method, p=p
    )
    std_bar_ihs = np.transpose(np.array([mu_ihs - std_ihs, mu_ihs + std_ihs]))
    index_values = np.arange(1, p + 1)
    fig = plot_stats(
        mu_standard,
        xs=index_values,
        conf_intvs=std_bar_standard,
        label="standardized",
        title=f"Mean AR({p}) Coefficients With Standard Deviations",
        subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        showgrid=True,
        ax_height=300,
        calc_xticks=True,
    )
    display(fig)
    fig = plot_stats(
        mu_ihs,
        xs=index_values,
        conf_intvs=std_bar_ihs,
        color="tab:orange",
        label="IHS-normalized",
        title=f"Mean AR({p}) Coefficients With Standard Deviations",
        subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        showgrid=True,
        ax_height=300,
        calc_xticks=True,
    )
    display(fig)
In [397]:
%%html
<!--hide_me_please-->
<p class="showcode" id="12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
In [88]:
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_std(tss, method):
    mean_std_arr = np.zeros(p)
    for ts in tss:
        _, std = ar_means_and_stds(
            ts, begins, ends, method=method, p=p, relative_to_abs_sum=True
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


kwargs = {}
for method, color in zip([ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]):
    mean_std_standard = mean_std(standard_trans_ts, method=method)
    mean_std_ihs = mean_std(ihs_trans_ts, method=method)

    fig = plot_ts(
        mean_std_standard,
        color=color,
        linestyle="-",
        marker="o",
        name=f"{method} method on standard",
        title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
        subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
        xtitle="lag",
        height=800,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        mean_std_ihs,
        color=color,
        linestyle="--",
        marker="o",
        name=f"{method} method on IHS",
        **kwargs,
    )
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [34]:
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_std(tss, method):
    mean_std_arr = np.zeros(p)
    for ts in tss:
        _, std = ar_means_and_stds(
            ts, begins, ends, method=method, p=p, relative_to_abs_sum=True
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


kwargs = dict(
    title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
    subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="lag",
    ax_height=300,
    calc_xticks=True,
)

pacf_method = ld_biased
mean_std_standard = mean_std(standard_trans_ts, method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, method=pacf_method)

fig = plot_ts(
    mean_std_standard,
    color="darkblue",
    linestyle="-",
    marker="o",
    name=f"standardized",
    **kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
    mean_std_ihs,
    color="tab:orange",
    linestyle="-",
    marker="o",
    name=f"IHS-normalized",
    **kwargs,
)
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [35]:
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_std(tss, method):
    mean_std_arr = np.zeros(p)
    for ts in tss:
        _, std = ar_means_and_stds(
            ts, begins, ends, method=method, p=p, relative=True, margin="std"
        )
        mean_std_arr += np.abs(std)
    return mean_std_arr / len(tss)


kwargs = dict(
    title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
    subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="lag",
    ax_height=300,
    calc_xticks=True,
    #     yscale="log",
    xmargin=0.02,
)

pacf_method = ld_biased
mean_std_standard = mean_std(standard_trans_ts, method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, method=pacf_method)
index_values = np.arange(1, p + 1)

fig = plot_ts(
    mean_std_standard,
    index_values=index_values,
    color="darkblue",
    linestyle="",
    marker="o",
    name=f"{pacf_methods_names[pacf_method]} method for standardized",
    **kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
    mean_std_ihs,
    index_values=index_values,
    color="tab:orange",
    linestyle="",
    marker="o",
    name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
    **kwargs,
)
# ax = fig.get_axes()
# ax.set_y
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [398]:
%%html
<!--hide_me_please-->
<p class="showcode" id="123rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#123rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="123rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#123rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

Fitted AR Prediction Statistics¶

In [45]:
# ar_one_step_pred
def ar_one_step_pred(x, y, ar_coeffs, rand_std=None):
    """
    If rand_std is not None, then a random error of N(0,rand_std^2) is added to each predicted value.
    """
    n = len(x)
    ar_flipped = np.flipud(ar_coeffs)
    p = len(ar_flipped)
    y[:p] = x[:p]
    for i in range(p, n):
        d = np.dot(x[i - p : i], ar_flipped)
        if rand_std is not None:
            d += float(np.random.normal(0, rand_std, 1))
        y[i] = d
In [46]:
# ar_fit_stats
def ar_fit_stats(add_errors=False):
    #     scorings_names = ["mae", "mape", "mase"]
    scorings_names = ["mae"]
    #     pacf_methods = [ols, burg, ld_biased]
    pacf_methods = [ols, ld_biased]

    pred_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
    ar_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
    pacf_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}

    for ts, ts_type in zip(
        [standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
        ["standard", "IHS"],
    ):
        for pacf_method in pacf_methods:
            pacf_values = pacf(ts[:n], method=pacf_method, nlags=p)
            ar_coeffs, _ = ar_from_pacf(pacf_values, nlags=p)

            x = ts.values
            y = np.zeros_like(x)
            ar_one_step_pred(x, y, ar_coeffs)
            y = (y - np.mean(y)) / np.std(y)
            pacf_values_y = pacf(y[-n:], method=pacf_method, nlags=p)
            ar_coeffs_y, _ = ar_from_pacf(pacf_values_y, nlags=p)

            for scoring_name in scorings_names:
                scoring = get_scoring(scoring_name)
                model_name = f"{pacf_method} AR({p}) of {ts_type}"
                pred_diffs_errors[scoring_name].append(
                    (np.round(scoring(x, y), 3), model_name)
                )
                ar_diffs_errors[scoring_name].append(
                    (np.round(scoring(ar_coeffs, ar_coeffs_y), 3), model_name)
                )
                pacf_diffs_errors[scoring_name].append(
                    (np.round(scoring(pacf_values, pacf_values_y), 3), model_name)
                )

    for stats, stats_name, in zip(
        [pred_diffs_errors, pacf_diffs_errors, ar_diffs_errors],
        [
            "Differences between x and predicted y",
            "Differences between pacf_x and pacf_y",
            "Differences between AR coeffs of x and y",
        ],
    ):
        print(f"{stats_name}:")
        for scoring_name in scorings_names:
            stats[scoring_name] = sorted(stats[scoring_name])
            print(f"  {scoring_name}:")
            for res, name in stats[scoring_name]:
                print(f"    {res} – {name}")


n = 1000
ts_idx = 5
p = 16
ar_fit_stats()
Differences between x and predicted y:
  mae:
    0.211 – ols AR(16) of standard
    0.252 – ld_biased AR(16) of standard
    0.322 – ols AR(16) of IHS
    0.324 – ld_biased AR(16) of IHS
Differences between pacf_x and pacf_y:
  mae:
    0.063 – ols AR(16) of standard
    0.082 – ld_biased AR(16) of standard
    0.142 – ld_biased AR(16) of IHS
    0.159 – ols AR(16) of IHS
Differences between AR coeffs of x and y:
  mae:
    0.441 – ld_biased AR(16) of IHS
    0.485 – ld_biased AR(16) of standard
    0.515 – ols AR(16) of IHS
    0.969 – ols AR(16) of standard
In [70]:
# ar_fit_stats
def ar_fit_stats(add_errors=False):
    scorings_names = ["mae", "mse", "mape", "mase"]
    #     scorings_names = ["mae"]
    pacf_methods = [ols, burg, ld_biased]
    #     pacf_methods = [ols, ld_biased]

    pred_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
    ar_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
    pacf_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}

    for ts, ts_type in zip(
        [standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
        ["standard", "IHS"],
    ):
        for pacf_method in pacf_methods:
            pacf_values = pacf(ts[:n], method=pacf_method, nlags=p)
            ar_coeffs, _ = ar_from_pacf(pacf_values, nlags=p)

            x = ts.values
            y = np.zeros_like(x)
            ar_one_step_pred(x, y, ar_coeffs)
            y = (y - np.mean(y)) / np.std(y)
            pacf_values_y = pacf(y[-n:], method=pacf_method, nlags=p)
            ar_coeffs_y, _ = ar_from_pacf(pacf_values_y, nlags=p)

            for scoring_name in scorings_names:
                scoring = get_scoring(scoring_name)
                model_name = f"{pacf_method} AR({p}) of {ts_type}"
                pred_diffs_errors[scoring_name].append(
                    (np.round(scoring(x, y), 3), model_name)
                )
                ar_diffs_errors[scoring_name].append(
                    (np.round(scoring(ar_coeffs, ar_coeffs_y), 3), model_name)
                )
                pacf_diffs_errors[scoring_name].append(
                    (np.round(scoring(pacf_values, pacf_values_y), 3), model_name)
                )

    for stats, stats_name, in zip(
        [pred_diffs_errors, pacf_diffs_errors, ar_diffs_errors],
        [
            "Differences between x and predicted y",
            "Differences between pacf_x and pacf_y",
            "Differences between AR coeffs of x and y",
        ],
    ):
        print(f"{stats_name}:")
        for scoring_name in scorings_names:
            stats[scoring_name] = sorted(stats[scoring_name])
            print(f"  {scoring_name}:")
            for res, name in stats[scoring_name]:
                print(f"    {res} – {name}")


n = 1000
ts_idx = 5
p = 25
ar_fit_stats()
Differences between x and predicted y:
  mae:
    0.195 – burg AR(25) of ordinary
    0.195 – ols AR(25) of ordinary
    0.247 – ld_biased AR(25) of ordinary
    0.312 – burg AR(25) of IHS
    0.312 – ols AR(25) of IHS
    0.314 – ld_biased AR(25) of IHS
  mse:
    0.079 – burg AR(25) of ordinary
    0.079 – ols AR(25) of ordinary
    0.109 – ld_biased AR(25) of ordinary
    0.153 – burg AR(25) of IHS
    0.153 – ols AR(25) of IHS
    0.154 – ld_biased AR(25) of IHS
  mape:
    0.621 – ols AR(25) of ordinary
    0.626 – burg AR(25) of ordinary
    2.141 – ld_biased AR(25) of ordinary
    6.908 – ols AR(25) of IHS
    6.909 – burg AR(25) of IHS
    6.913 – ld_biased AR(25) of IHS
  mase:
    0.627 – burg AR(25) of ordinary
    0.627 – ols AR(25) of ordinary
    0.765 – burg AR(25) of IHS
    0.765 – ols AR(25) of IHS
    0.769 – ld_biased AR(25) of IHS
    0.797 – ld_biased AR(25) of ordinary
Differences between pacf_x and pacf_y:
  mae:
    0.061 – ld_biased AR(25) of ordinary
    0.108 – ld_biased AR(25) of IHS
    0.119 – burg AR(25) of IHS
    0.12 – ols AR(25) of IHS
    0.202 – burg AR(25) of ordinary
    0.203 – ols AR(25) of ordinary
  mse:
    0.008 – ld_biased AR(25) of ordinary
    0.022 – ld_biased AR(25) of IHS
    0.027 – burg AR(25) of IHS
    0.027 – ols AR(25) of IHS
    0.101 – burg AR(25) of ordinary
    0.102 – ols AR(25) of ordinary
  mape:
    0.469 – burg AR(25) of ordinary
    0.469 – ols AR(25) of ordinary
    0.548 – ld_biased AR(25) of ordinary
    1.845 – burg AR(25) of IHS
    1.878 – ols AR(25) of IHS
    4.142 – ld_biased AR(25) of IHS
  mase:
    0.168 – ld_biased AR(25) of ordinary
    0.232 – burg AR(25) of ordinary
    0.232 – ols AR(25) of ordinary
    0.442 – ld_biased AR(25) of IHS
    0.475 – burg AR(25) of IHS
    0.476 – ols AR(25) of IHS
Differences between AR coeffs of x and y:
  mae:
    0.267 – ld_biased AR(25) of ordinary
    0.328 – ld_biased AR(25) of IHS
    0.364 – burg AR(25) of IHS
    0.365 – ols AR(25) of IHS
    457.438 – burg AR(25) of ordinary
    465.964 – ols AR(25) of ordinary
  mse:
    0.137 – ld_biased AR(25) of ordinary
    0.221 – ld_biased AR(25) of IHS
    0.275 – burg AR(25) of IHS
    0.276 – ols AR(25) of IHS
    409826.958 – burg AR(25) of ordinary
    425737.152 – ols AR(25) of ordinary
  mape:
    0.399 – ld_biased AR(25) of ordinary
    0.923 – ols AR(25) of ordinary
    0.924 – burg AR(25) of ordinary
    1.737 – ld_biased AR(25) of IHS
    2.102 – burg AR(25) of IHS
    2.108 – ols AR(25) of IHS
  mase:
    0.148 – ld_biased AR(25) of ordinary
    0.471 – burg AR(25) of ordinary
    0.471 – ols AR(25) of ordinary
    0.508 – ld_biased AR(25) of IHS
    0.537 – burg AR(25) of IHS
    0.538 – ols AR(25) of IHS
In [399]:
%%html
<!--hide_me_please-->
<p class="showcode" id="1234rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#1234rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="1234rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#1234rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

PSD Plotting¶

In [35]:
def dft(x):
    """
    Function to calculate the
    discrete Fourier Transform
    of a 1D real-valued signal x
    """
    N = len(x)
    n = np.arange(N)
    k = n.reshape((N, 1))
    e = np.exp(-2j * np.pi * k * n / N)
    X = np.dot(e, x)
    return X
In [36]:
# psd_dft_plots
kwargs1 = dict(
    xtitle="Freq (Hz)",
    ytitle="Amplitude",
    title="Power Spectral Density by DFT",
)

kwargs2 = dict(
    xtitle="Freq (Hz)",
    ytitle="Amplitude",
    title="Smoothed Power Spectral Density by DFT",
)

kwargs3 = dict(
    xtitle="Freq (Hz)",
    ytitle="Amplitude",
    title="Smoothed Power Spectral Density by DFT",
    yscale="log"
)

ts_idx = 5
for ts, ts_type, color in zip(
    [standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
    ["standard", "IHS"],
    ["tab:blue", "tab:red"],
):
    dft_values = dft(ts)

    # sampling rate
    sr = 128
    # sampling interval
    sintv = 1.0 / sr

    # calculate the frequency
    N = len(dft_values)
    T = N / sr
    freq = np.arange(N) / T
    N //= 2
    dft_values = dft_values[:N] / N
    freq = freq[:N]

    psd = pd.Series(np.abs(dft_values))

    fig1 = plot_stats(
        psd, xs=freq, name=ts_type, marker="", color=color, alpha=0.5, **kwargs1
    )
    kwargs1 = dict(fig=fig1)

    smoothed_psd = get_smoothed(get_smoothed(psd, weights=np.arange(4 * T + 1)), std=T)
    fig2 = plot_ts(
        smoothed_psd, index_values=freq, name=ts_type, color=color, **kwargs2
    )
    kwargs2 = dict(fig=fig2)
    
    fig3 = plot_ts(
        smoothed_psd, index_values=freq, name=ts_type, color=color, **kwargs3
    )
    kwargs3 = dict(fig=fig3)

display(fig1)
display(fig2)
display(fig3)
In [400]:
%%html
<!--hide_me_please-->
<p class="showcode" id="12345rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12345rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12345rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12345rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
In [37]:
# psd_fun_from_ar
def psd_fun_from_ar(ar_coeffs, ts, sampT):
    p = len(ar_coeffs)
    x = ts.values

    y = np.zeros_like(x)
    ar_one_step_pred(x, y, ar_coeffs)
    s = np.std(x - y) ** 2

    return np.vectorize(
        lambda f: s
        * sampT
        / (
            np.abs(
                (
                    1
                    - np.dot(
                        np.exp(-2j * np.pi * f * sampT * np.arange(1, p + 1)),
                        ar_coeffs,
                    )
                )
            )
            ** 2
        )
    )
In [411]:
# psd_from_ar_plots
p = 50
for pacf_method in [ols, ld_biased, burg]:
    kwargs = dict(
        xtitle="Freq (Hz)",
        ytitle="Amplitude",
        title=f"Power Spectral Density by {pacf_method.upper()} AR({p})",
    )
    for ts, ts_type, color in zip(
        [standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
        ["standard", "IHS"],
        ["tab:blue", "tab:red"],
    ):
        pacf_values = pacf(ts, method=pacf_method, nlags=p)
        ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
        sr = 128
        psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
        N = 1000
        freq = np.arange(N) / (2 * N) * sr
        fig = plot_ts(
            psd(freq),
            #             yscale="log",
            index_values=freq,
            name=ts_type,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    display(fig)
In [38]:
# psd_from_ar_plots
p = 30
for pacf_method in [ols, ld_biased, burg]:
    kwargs = dict(
        xtitle="Freq (Hz)",
        ytitle="Amplitude",
        title=f"Power Spectral Density by {pacf_method.upper()} AR({p})",
    )
    for ts, ts_type, color in zip(
        [standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
        ["standard", "IHS"],
        ["tab:blue", "tab:red"],
    ):
        pacf_values = pacf(ts, method=pacf_method, nlags=p)
        ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
        sr = 128
        psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
        N = 1000
        freq = np.arange(N) / (2 * N) * sr
        fig = plot_ts(
            psd(freq),
            #             yscale="log",
            index_values=freq,
            name=ts_type,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    display(fig)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
/tmp/ipykernel_5952/2637773477.py in <module>
     15         ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
     16         sr = 128
---> 17         psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
     18         N = 1000
     19         freq = np.arange(N) / (2 * N) * sr

/tmp/ipykernel_5952/3540895875.py in psd_fun_from_ar(ar_coeffs, ts, sampT)
      5 
      6     y = np.zeros_like(x)
----> 7     ar_one_step_pred(x, y, ar_coeffs)
      8     s = np.std(x - y) ** 2
      9 

NameError: name 'ar_one_step_pred' is not defined
In [409]:
# psd_from_ar_plots
p = 30
for pacf_method in [ld_biased]:
    kwargs = dict(
        xtitle="Freq (Hz)",
        ytitle="Amplitude",
        title=f"Power Spectral Density by {pacf_method.upper()} AR({p})",
    )
    for ts, ts_type, color in zip(
        [standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
        ["standard", "IHS"],
        ["tab:blue", "tab:red"],
    ):
        pacf_values = pacf(ts, method=pacf_method, nlags=p)
        ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
        sr = 128
        psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
        N = 1000
        freq = np.arange(N) / (2 * N) * sr
        fig = plot_ts(
            psd(freq),
            #             yscale="log",
            index_values=freq,
            name=ts_type,
            color=color,
            **kwargs,
        )
        kwargs = dict(fig=fig)
    display(fig)
In [401]:
%%html
<!--hide_me_please-->
<p class="showcode" id="123456rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#123456rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="123456rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#123456rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>

PSD Statistics¶

In [40]:
# def ar_psd_means_and_stds
def ar_psd_means_and_stds(
    ts,
    begins,
    ends,
    sr,
    freqs,
    method,
    p=20,
    relative=False,
    relative_to_abs_sum=False,
    relative_to_mean_area=False,
    unit_area=True,
):
    n = len(begins)
    assert len(ends) == n
    psd_values = np.zeros((n, N), dtype=float)
    for i, (begin, end) in enumerate(zip(begins, ends)):
        ar_coeffs_values = ar_coeffs(ts[begin:end], method=method, p=p)
        psd_values[i, :] = psd_fun_from_ar(ar_coeffs_values, ts, 1.0 / sr)(freqs)
        if unit_area:
            psd_values[i, :] /= np.sum(psd_values[i, :])
    mu = np.mean(psd_values, 0)
    std = np.std(psd_values, 0)
    if relative:
        std /= mu
    if relative_to_abs_sum:
        std /= np.sum(np.abs(psd_values), 0)
    if relative_to_mean_area:
        std /= np.sum(mu)
    return mu, std
In [414]:
%%html
<!--hide_me_please-->
<p id="gfd1234765dfghnbrtyaa"></p>
<p class="hidecode" id="gfd1234765dfghnbrty"><a href="javascript:code_toggle('#gfd1234765dfghnbrtyaa')">hide codes in the notebook</a></p>
In [432]:
# Mean PSD by AR(p) With Standard Deviations
for method in [ld_biased, burg]:
    ts_idx = 5
    p = 50
    ts_len = 2000
    N = ts_len // 2
    sr = 128
    freqs = np.arange(N) / (2 * N) * sr
    step = 750
    begin = 0
    begins = np.arange(begin, 8001 - ts_len, step)
    ends = np.arange(begin + ts_len, 8001, step)
    mu_standard, std_standard = ar_psd_means_and_stds(
        standard_trans_ts[ts_idx],
        begins,
        ends,
        method=method,
        p=p,
        sr=sr,
        freqs=freqs,
        unit_area=True,
        relative_to_mean_area=True,
    )
    mu_ihs, std_ihs = ar_psd_means_and_stds(
        ihs_trans_ts[ts_idx],
        begins,
        ends,
        method=method,
        p=p,
        sr=sr,
        freqs=freqs,
        unit_area=True,
        relative_to_mean_area=True,
    )

    fig = plot_ts(
        mu_standard,
        index_values=freqs,
        name="standard",
        title=f"Mean PSD by AR({p}) With Standard Deviations",
        subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        xtitle="Freq (Hz)",
        ytitle="Amplitude",
        showgrid=True,
    )
    ax = fig.get_axes()[0]
    ax.fill_between(
        freqs,
        std_standard,
        np.zeros_like(std_standard),
        alpha=0.25,
        color="tab:blue",
    )
    fig = plot_ts(mu_ihs, index_values=freqs, color="tab:red", name="IHS", fig=fig)
    ax.fill_between(
        freqs,
        std_ihs,
        np.zeros_like(std_ihs),
        alpha=0.25,
        color="tab:red",
    )
    display(fig)

    fig = plot_ts(
        mu_standard,
        index_values=freqs,
        name="standard",
        title=f"Mean PSD by AR({p}) With Standard Deviations",
        subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        xtitle="Freq (Hz)",
        ytitle="Amplitude",
        showgrid=True,
        yscale="log",
    )
    ax = fig.get_axes()[0]
    ax.fill_between(
        freqs,
        mu_standard + std_standard,
        mu_standard - std_standard,
        alpha=0.25,
        color="tab:blue",
    )
    fig = plot_ts(mu_ihs, index_values=freqs, color="tab:red", name="IHS", fig=fig)
    ax.fill_between(
        freqs,
        mu_ihs + std_ihs,
        mu_ihs - std_ihs,
        alpha=0.25,
        color="tab:red",
    )
    display(fig)
In [403]:
%%html
<!--hide_me_please-->
<p class="showcode" id="12345678rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12345678rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12345678rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12345678rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
In [425]:
# mean standard deviation of nth coefficients AR(p)
ps = np.array([5, 10, 15, 20, 25, 30, 35, 40, 50, 60, 70, 80, 90, 100])
ts_len = 2000
N = ts_len // 2
sr = 128
freqs = np.arange(N) / (2 * N) * sr
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_stds(tss, freqs, method, ps):
    mean_std_psd = np.zeros(len(ps))
    for i, p in enumerate(ps):
        for ts in tss:
            _, std = ar_psd_means_and_stds(
                ts, begins, ends, sr, freqs, method, p=p, unit_area=True
            )
            mean_std_psd[i] += np.sum(std)
    return mean_std_psd / len(tss)


mean_std_psd_standard_lst = [mean_stds(standard_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased, burg, ols]]
mean_std_psd_ihs_lst = [mean_stds(ihs_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased, burg, ols]]

# mean_std_psd_standard_lst = [
#     mean_stds(standard_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased]
# ]
# mean_std_psd_ihs_lst = [
#     mean_stds(ihs_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased]
# ]
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]
In [427]:
kwargs = dict(
    title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
    subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="p (order of AR)",
    xmargin=0.02,
    calc_xticks=True,
    #     yscale="log",
)
for method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
    [ld_biased, burg, ols],
    [("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
    zip(mean_std_psd_standard_lst, mean_std_psd_ihs_lst),
):

    fig = plot_ts(
        mean_std_standard,
        index_values=ps,
        name=f"{pacf_methods_names[method]} method on the standardized",
        color="darkblue",
        linestyle=linestyle,
        linewidth=linewidth,
        marker="o",
        height=800,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        mean_std_ihs,
        index_values=ps,
        name=f"{pacf_methods_names[method]} method on the IHS-normalized",
        color="tab:orange",
        linestyle=linestyle,
        linewidth=linewidth,
        marker="o",
        **kwargs,
    )
display(fig)
In [423]:
kwargs = dict(
    title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
    subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="p (order of AR)",
    xmargin=0.02,
    calc_xticks=True,
    #     yscale="log",
)
for method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
    [ld_biased], [("-", 1.5)], zip(mean_std_psd_standard_lst, mean_std_psd_ihs_lst)
):
    #     [ld_biased, burg, ols],
    #     [("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
    #     zip(mean_std_standard_lst, mean_std_ihs_lst)):

    fig = plot_ts(
        mean_std_standard,
        index_values=ps,
        name=f"{pacf_methods_names[method]} method on the standardized",
        color="darkblue",
        linestyle=linestyle,
        linewidth=linewidth,
        marker="o",
        height=800,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        mean_std_ihs,
        index_values=ps,
        name=f"{pacf_methods_names[method]} method on the IHS-normalized",
        color="tab:orange",
        linestyle=linestyle,
        linewidth=linewidth,
        marker="o",
        **kwargs,
    )
display(fig)
In [30]:
%%html
<!--hide_me_please-->
<p class="showcode" id="zxc12345678rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#zxc12345678rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="zxc12345678rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#zxc12345678rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
In [426]:
# mean standard deviation of nth coefficients AR(p)
ps = np.array(
    [
        5,
        10,
        15,
        20,
        25,
        30,
        35,
        40,
        50,
        60,
        80,
        100,
        150,
        200,
    ]
)
ts_len = 2000
N = 256
sr = 128
freqs = np.arange(N) / (2 * N) * sr
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_stds(tss, freqs, method, ps):
    mean_std_psd = np.zeros(len(ps))
    for i, p in enumerate(ps):
        for ts in tss:
            _, std = ar_psd_means_and_stds(
                ts, begins, ends, sr, freqs, method, p=p, relative_to_mean_area=True
            )
            mean_std_psd[i] += np.sum(std)
    return mean_std_psd / len(tss)


kwargs = dict(
    title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
    subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="p (order of AR)",
    #     yscale="log",
)
for method, color in zip([ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]):
    mean_std_standard = mean_stds(standard_trans_ts, freqs, method=method, ps=ps)
    mean_std_ihs = mean_stds(ihs_trans_ts, freqs, method=method, ps=ps)

    fig = plot_ts(
        mean_std_standard,
        index_values=ps,
        name=f"{method} method on standard",
        color=color,
        linestyle="-",
        marker="o",
        height=800,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        mean_std_ihs,
        index_values=ps,
        name=f"{method} method on IHS",
        color=color,
        linestyle="--",
        marker="o",
        **kwargs,
    )
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]

Plots for the thesis¶

PACFs¶

In [89]:
# def plot_diverse_intervals_experiments
def plot_pacf_for_intervals(
    ts, ts_type, begins, ends, ps, pacf_method, alpha=0.05, nlags=None, **kwargs
):
    for begin, end, p in zip(begins, ends, ps):
        color = (0, 1 - p * 0.5, 0)
        plot_pacf(
            ts[begin:end],
            alpha=alpha,
            nlags=nlags,
            method=pacf_method,
            #             label=f"[{begin},{end-1}]",
            color=color,
            conf_alpha=0.02,
            **kwargs,
        )
    if fig is not None:
        return fig


def plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    intervals_type,
    pacf_methods,
    ts_list,
    title=None,
    subtitle=None,
    **kwargs,
):
    for pacf_method in pacf_methods:
        method_descr = methods_descrs[pacf_method]
        fig, axs = fig_with_vertical_subplots(
            len(ts_list),
            title=None,
            #             subplots_titles=True,
            ax_height=200,
            fontsize=15,
        )
        for (ts, ts_type), ax in zip(ts_list, axs):
            ax.set_ylim(bottom=-0.7, top=1.05)
            plot_pacf_for_intervals(
                ts,
                ts_type,
                begins,
                ends,
                ps,
                pacf_method,
                plot_params={},
                ax=ax,
                **kwargs,
            )
        display(fig)


# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ld_biased, ols, burg],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [95]:
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ols],
    ts_list=[
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [94]:
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ld_biased, ols, burg],
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [92]:
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    f"[n,n+{ts_len})",
    [ols],
    ts_list=[
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    nlags=nlags,
)
In [298]:
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")


def mean_std(tss, pacf_method):
    mean_std_arr = np.zeros(nlags + 1)
    for ts in tss:
        _, std, _ = pacf_means_and_stds(
            ts, begins, ends, pacf_method=pacf_method, nlags=nlags
        )
        mean_std_arr += std
    return mean_std_arr / len(tss)


kwargs = dict(
    xtitle="lag",
    height=500,
)
for pacf_method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
    [ld_biased, burg, ols],
    [("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
    zip(mean_std_standard_lst, mean_std_ihs_lst),
):
    fig = plot_ts(
        get_smoothed(pd.Series(mean_std_standard), std=2, only_prevs=False),
        color="darkblue",
        linestyle=linestyle,
        linewidth=linewidth,
        #         alpha=0.5,
        #         marker="o",
        name=f"{pacf_methods_names[pacf_method]} method for standardized",
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        get_smoothed(pd.Series(mean_std_ihs), std=2, only_prevs=False),
        color="tab:orange",
        linestyle=linestyle,
        linewidth=linewidth,
        #         alpha=0.5,
        #         marker="o",
        name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
        **kwargs,
    )
display(fig)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]

AR parameters¶

In [41]:
# def plot_ar_diverse_intervals_experiments
def plot_ar_for_intervals(ts, ts_type, begins, ends, ps, method, p, **kwargs):
    fig = None
    kwargs["calc_xticks"] = True
    kwargs["major_xstep"] = 1
    for begin, end, s in zip(begins, ends, ps):
        color = (0, 1 - s * 0.5, 0)
        values = ar_coeffs(ts[begin:end], p, method)
        fig = plot_stats(
            values,
            xs=np.arange(1, p+1),
            #             label=f"on [{begin},{end-1}]",
            color=color,
            #             showgrid=True,
            fig=fig,
            **kwargs,
        )
        if "calc_xticks" in kwargs:
            kwargs.pop("calc_xticks")
    if fig is not None:
        return fig


def plot_ar_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    methods,
    p,
    ts_list,
    title=None,
    subtitle=None,
    **kwargs,
):
    const_title = title is not None
    const_subtitle = subtitle is not None
    for method in methods:
        method_descr = methods_descrs[method]
        for ts, ts_type in ts_list:
            fig = plot_ar_for_intervals(
                ts,
                ts_type,
                begins,
                ends,
                ps,
                method,
                p,
                **kwargs,
            )
            display(fig)


# ld-biased ar experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_ar_diverse_intervals_experiments(
    begins,
    ends,
    ps,
    methods=[ld_biased, ols, burg],
    p=20,
    ts_list=[
        (standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
        (ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
    ],
    title="",
    ax_height=200,
)
In [39]:
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_std(tss, method):
    mean_std_arr = np.zeros(p)
    for ts in tss:
        _, std = ar_means_and_stds(
            ts, begins, ends, method=method, p=p, relative=True, margin="std"
        )
        mean_std_arr += np.abs(std)
    return mean_std_arr / len(tss)


kwargs = dict(
    #     title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
    #     subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="k",
    ax_height=300,
    calc_xticks=True,
    #     yscale="log",
    xmargin=0.02,
)

pacf_method = ld_biased
mean_std_standard = mean_std(standard_trans_ts, method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, method=pacf_method)
index_values = np.arange(1, p + 1)

fig = plot_ts(
    mean_std_standard,
    index_values=index_values,
    color="darkblue",
    linestyle="",
    marker="o",
    name=f"{pacf_methods_names[pacf_method]} method for standardized",
    **kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
    mean_std_ihs,
    index_values=index_values,
    color="tab:orange",
    linestyle="",
    marker="o",
    name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
    **kwargs,
)
# ax = fig.get_axes()
# ax.set_y
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]

PSD¶

In [72]:
# Mean PSD by AR(p) With Standard Deviations
for method in [ld_biased, ols, burg]:
    ts_idx = 5
    p = 50
    ts_len = 2000
    N = ts_len // 2
    sr = 128
    freqs = np.arange(N) / (2 * N) * sr
    step = 750
    begin = 0
    begins = np.arange(begin, 8001 - ts_len, step)
    ends = np.arange(begin + ts_len, 8001, step)
    mu_standard, std_standard = ar_psd_means_and_stds(
        standard_trans_ts[ts_idx],
        begins,
        ends,
        method=method,
        p=p,
        sr=sr,
        freqs=freqs,
        unit_area=False,
        relative_to_mean_area=False,
    )
    mu_ihs, std_ihs = ar_psd_means_and_stds(
        ihs_trans_ts[ts_idx],
        begins,
        ends,
        method=method,
        p=p,
        sr=sr,
        freqs=freqs,
        unit_area=False,
        relative_to_mean_area=False,
    )

    index = np.where((freqs >= 3) & (freqs <= 12))
    fig = plot_ts(
        mu_standard,
        index=index,
        index_values=freqs[index],
        name="standardized",
        #         title=f"Mean PSD by AR({p}) With Standard Deviations",
        #         subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
        xtitle="Freq (Hz)",
        ytitle="Amplitude",
        color="darkblue",
        #         calc_xticks=True,
        ax_height=300,
        showgrid=True,
    )
    ax = fig.get_axes()[0]
    ax.fill_between(
        freqs[index],
        std_standard[index],
        np.zeros_like(std_standard[index]),
        alpha=0.1,
        color="darkblue",
    )
    fig = plot_ts(
        mu_ihs[index],
        index_values=freqs[index],
        color="tab:orange",
        name="IHS-normalized",
        fig=fig,
    )
    ax.fill_between(
        freqs[index],
        std_ihs[index],
        np.zeros_like(std_ihs[index]),
        alpha=0.25,
        color="tab:orange",
    )
    ax.set_ylim(top=0.5)
    display(fig)
In [61]:
freqs[index]
Out[61]:
array([0.   , 0.064, 0.128, 0.192, 0.256, 0.32 , 0.384, 0.448, 0.512,
       0.576, 0.64 , 0.704, 0.768, 0.832, 0.896, 0.96 , 1.024, 1.088,
       1.152, 1.216, 1.28 , 1.344, 1.408, 1.472, 1.536, 1.6  , 1.664,
       1.728, 1.792, 1.856, 1.92 , 1.984, 2.048, 2.112, 2.176, 2.24 ,
       2.304, 2.368, 2.432, 2.496, 2.56 , 2.624, 2.688, 2.752, 2.816,
       2.88 , 2.944, 3.008, 3.072, 3.136, 3.2  , 3.264, 3.328, 3.392,
       3.456, 3.52 , 3.584, 3.648, 3.712, 3.776, 3.84 , 3.904, 3.968,
       4.032, 4.096, 4.16 , 4.224, 4.288, 4.352, 4.416, 4.48 , 4.544,
       4.608, 4.672, 4.736, 4.8  , 4.864, 4.928, 4.992, 5.056, 5.12 ,
       5.184, 5.248, 5.312, 5.376, 5.44 , 5.504, 5.568, 5.632, 5.696,
       5.76 , 5.824, 5.888, 5.952, 6.016, 6.08 , 6.144, 6.208, 6.272,
       6.336, 6.4  , 6.464, 6.528, 6.592, 6.656, 6.72 , 6.784, 6.848,
       6.912, 6.976, 7.04 , 7.104, 7.168, 7.232, 7.296, 7.36 , 7.424,
       7.488, 7.552, 7.616, 7.68 , 7.744, 7.808, 7.872, 7.936, 8.   ,
       8.064, 8.128, 8.192, 8.256, 8.32 , 8.384, 8.448, 8.512, 8.576,
       8.64 , 8.704, 8.768, 8.832, 8.896, 8.96 , 9.024, 9.088, 9.152,
       9.216, 9.28 , 9.344, 9.408, 9.472, 9.536, 9.6  , 9.664, 9.728,
       9.792, 9.856, 9.92 ])
In [431]:
kwargs = dict(
    #     title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
    #     subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
    xtitle="p (the order of the AR model)",
    xmargin=0.02,
    calc_xticks=True,
    #     yscale="log",
)
for method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
    [ld_biased, burg, ols],
    [("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
    zip(mean_std_psd_standard_lst, mean_std_psd_ihs_lst),
):

    fig = plot_ts(
        mean_std_standard,
        index_values=ps,
        name=f"{pacf_methods_names[method]} method on the standardized",
        color="darkblue",
        linestyle=linestyle,
        linewidth=linewidth,
        marker="o",
        height=550,
        **kwargs,
    )
    kwargs = dict(fig=fig)
    fig = plot_ts(
        mean_std_ihs,
        index_values=ps,
        name=f"{pacf_methods_names[method]} method on the IHS-normalized",
        color="tab:orange",
        linestyle=linestyle,
        linewidth=linewidth,
        marker="o",
        **kwargs,
    )
display(fig)

QQ-plot ACF and norms¶

In [300]:
# Q-Q plot: EEG data versus a normal distribution
n = 7000
begin = 1000
ts_idx = 5
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, n))
normal_q = pd.Series(normal_samples, index=normal_samples)
standard_q = pd.Series(
    np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
    index=normal_samples,
)
ihs_q = pd.Series(
    np.sort(ihs_trans_ts[ts_idx][begin : begin + n]), index=normal_samples
)
fig = plot_ts(
    normal_q,
    color="black",
    xtitle="normal theoretical quantiles",
    ytitle="data quantiles",
    #     title="Quantile-Quantile Plot for EEG Data",
    #     subtitle="EEG data versus a normal distribution",
    width=500,
    height=500,
)
markersize = 5
plot_ts(
    standard_q,
    color="darkblue",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="standardized",
)
plot_ts(
    ihs_q,
    color="tab:orange",
    marker="o",
    markersize=markersize,
    mfc="none",
    linestyle="None",
    alpha=0.5,
    fig=fig,
    name="IHS-normalized",
)
display(fig)
In [32]:
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
    #     tss = tss[0:2]
    mean_norms = np.zeros((len(tss), nlags + 1))
    std_norms = np.zeros((len(tss), nlags + 1))
    for i, ts in enumerate(tss):
        for lags in range(1, nlags + 1):
            norms = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                if inv:
                    mcov = np.linalg.inv(mcov)
                norms.append(np.linalg.norm(mcov, p))
            mean_norms[i][lags] = np.mean(norms)
            std_norms[i][lags] = np.std(norms)
    return (
        pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
        pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
    )


#     return (
#         pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
#         pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
#     )

p = 2
kwargs = dict(
    xtitle="$h$ (matrix size)",
    ax_height=300,
    calc_xticks=True,
)
fig = None
for ts, ts_name, color, conf_alpha, inv in [
    (standard_trans_ts, "$||\hat{R}_h||_2$ for standardized", "darkblue", 0.1, False),
    (ihs_trans_ts, "$||\hat{R}_h||_2$ for IHS-normalized", "tab:orange", 0.25, False),
]:
    mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
    #     std_bar = np.transpose(
    #         np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
    #     )
    std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
    fig = plot_stats(
        mean_norms.values,
        xs=mean_norms.index.values,
        conf_intvs=std_bar,
        fill_along_axis=False,
        fill_only_positive=False,
        label=ts_name,
        color=color,
        conf_alpha=conf_alpha,
        yscale="log",
        **kwargs,
    )
    kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [33]:
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)

print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")


def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
    #     tss = tss[0:2]
    mean_norms = np.zeros((len(tss), nlags + 1))
    std_norms = np.zeros((len(tss), nlags + 1))
    for i, ts in enumerate(tss):
        for lags in range(1, nlags + 1):
            norms = []
            for begin, end in zip(begins, ends):
                acov = acovf(ts[begin:end], fft=False)[:lags]
                acov /= acov[0]
                mcov = cov_matrix(acov)
                if inv:
                    mcov = np.linalg.inv(mcov)
                norms.append(np.linalg.norm(mcov, p))
            mean_norms[i][lags] = np.mean(norms)
            std_norms[i][lags] = np.std(norms)
    return (
        pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
        pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
    )


#     return (
#         pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
#         pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
#     )

p = 2
kwargs = dict(
    xtitle="$h$ (matrix size)",
    ax_height=300,
    calc_xticks=True,
)
fig = None
for ts, ts_name, color, conf_alpha, inv in [
    (standard_trans_ts, "$||\hat{R}_h^{-1}||_2$ for standardized", "darkblue", 0.1, True),
    (ihs_trans_ts, "$||\hat{R}_h^{-1}||_2$ for IHS-normalized", "tab:orange", 0.25, True),
]:
    mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
    #     std_bar = np.transpose(
    #         np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
    #     )
    std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
    fig = plot_stats(
        mean_norms.values,
        xs=mean_norms.index.values,
        conf_intvs=std_bar,
        fill_along_axis=False,
        fill_only_positive=False,
        label=ts_name,
        color=color,
        conf_alpha=conf_alpha,
        yscale="log",
        **kwargs,
    )
    kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
Intervals: [[0, 1999], [750, 2749], [1500, 3499], [2250, 4249], [3000, 4999], [3750, 5749], [4500, 6499], [5250, 7249], [6000, 7999]]
In [314]:
# means_and_stds_autocorr
def means_and_stds_autocorr(
    begins, ends, title=None, subtitle=None, nlags=20, alpha=0.05
):
    print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
    ts_len = ends[0] - begins[0]
    kwargs = dict(title=title, subtitle=subtitle, ax_height=300)
    fig = None
    for ts, ts_name, color in [
        (standard_trans_ts, "standardized", "darkblue"),
        (ihs_trans_ts, "IHS-normalized", "tab:orange"),
    ]:
        means = np.zeros((len(ts), nlags + 1))
        stds = np.zeros((len(ts), nlags + 1))
        for ts_idx in range(len(ts)):
            autocorrs = np.zeros((len(begins), nlags + 1))
            for i, (begin, end) in enumerate(zip(begins, ends)):
                autocorrs[i, :] = acovf(ts[ts_idx][begin:end], fft=False)[: nlags + 1]
                autocorrs[i, :] /= autocorrs[i, 0]
            means[ts_idx, :] = np.mean(autocorrs, 0)
            stds[ts_idx, :] = np.std(autocorrs, 0)
        mean_mean = np.mean(means, 0)
        mean_std = np.mean(stds, 0)
        fig = plot_stats(
            mean_mean,
            std=mean_std,
            conf_alpha=0.1,
            xs=np.arange(0, nlags + 1),
            label=ts_name,
            color=color,
            markersize=7,
            showgrid=True,
            **kwargs,
        )
        kwargs = dict(fig=fig)

    conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
    plot_ts(
        pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
        linestyle="--",
        color="gray",
        name=f"{100 * (1-alpha)}% confidence interval",
        fig=fig,
    )
    plot_ts(
        pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
        linestyle="--",
        color="gray",
        fig=fig,
    )
    display(fig)


nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
means_and_stds_autocorr(begins, ends, title=None, subtitle=None, nlags=nlags)
Intervals: [[0, 2000], [750, 2750], [1500, 3500], [2250, 4250], [3000, 5000], [3750, 5750], [4500, 6500], [5250, 7250], [6000, 8000]]

END¶

In [3]:
%%html
<!--hide_me_please-->
<style>
.rendered_html {
    font-size: 110%;
}
</style>
In [4]:
%%html
<!--hide_me_please-->
<p class="showoutput_stderr" id="first123456aa_stderr"><a href="javascript:output_stderr_toggle('#first123456_stderr')">show stderr outputs in the notebook</a></p>
<p class="hideoutput_stderr" id="first123456_stderr"><a href="javascript:output_stderr_toggle('#first123456aa_stderr')">hide stderr outputs in the notebook</a></p>

<p class="showoutput_error" id="first123456aa_error"><a href="javascript:output_error_toggle('#first123456_error')">show errors in the notebook</a></p>
<p class="hideoutput_error" id="first123456_error"><a href="javascript:output_error_toggle('#first123456aa_error')">hide errors in the notebook</a></p>

<p class="showtemporary_output" id="first123456aa_temporary_output"><a href="javascript:temporary_output_toggle('#first123456_temporary_output')">show temporary outputs in the notebook</a></p>
<p class="hidetemporary_output" id="first123456_temporary_output"><a href="javascript:temporary_output_toggle('#first123456aa_temporary_output')">hide temporary outputs in the notebook</a></p>

<p class="showtemporary_input" id="first123456aa_temporary_input"><a href="javascript:temporary_input_toggle('#first123456_temporary_input')">show temporary inputs in the notebook</a></p>
<p class="hidetemporary_input" id="first123456_temporary_input"><a href="javascript:temporary_input_toggle('#first123456aa_temporary_input')">hide temporary inputs in the notebook</a></p>

<p class="showdev_codes" id="first123456aa_dev_codes"><a href="javascript:dev_codes_toggle('#first123456_dev_codes')">show dev codes in the notebook</a></p>
<p class="hidedev_codes" id="first123456_dev_codes"><a href="javascript:dev_codes_toggle('#first123456aa_dev_codes')">hide dev codes in the notebook</a></p>
In [5]:
%%html
<!--hide_me_please-->

<script>
// you can change the following default display settings
code_show=false; // ordinary code celss
dev_codes_show=false; // notebook javascript and html codes hidden
output_text_show=true; // stdout (exluding temporary outputs)
output_stderr_show=true; // stderr (exluding temporary outputs)
output_error_show=true; // python interpreter errors (exluding temporary outputs)
temporary_output_show=false; // temporary outputs (from temporary code cells)
temporary_input_show=false; // temporary code cells
output_report_view=true; // report view or work view

function code_toggle(target_id, change=true){
    if (change) {
        code_show=!code_show;
    }
    if(code_show) {
        $('.input').show();
        $('.code_cell').css('padding', '2px');
        $('.code_cell').css('border-width', '1px');
        $(".hidecode").show();
        $(".showcode").hide();
    } else {
        $('.input').hide();
        $('.code_cell').css('padding', '0px');
        $('.code_cell').css('border-width', '0px');
        $('.hidecode').hide();
        $('.showcode').show();
    }
    output_text_toggle('', false);
    output_stderr_toggle('', false);
    output_error_toggle('', false);
    temporary_output_toggle('', false);
    temporary_input_toggle('', false);
    dev_codes_toggle('', false)
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function output_text_toggle(target_id, change=true){
    if (change) {
        output_text_show=!output_text_show;
    }
    if(output_text_show) {
        $('.output_text').not(':contains("this_is_temporary_output")').show();
        $(".hideoutput_text").show();
        $(".showoutput_text").hide();
    } else {
        $('.output_text').not(':contains("this_is_temporary_output")').hide();
        $('.hideoutput_text').hide();
        $('.showoutput_text').show();
    }
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function dev_codes_toggle(target_id, change=true){
    if (change) {
        dev_codes_show=!dev_codes_show;
    }
    if(dev_codes_show) {
        $('.output_javascript:contains("hide_me_please")').show();
        $('.input:contains("hide_me_please")').show();
        $('.prompt:contains("hide_me_please")').show();
        $('.code_cell:contains("hide_me_please")').css('padding', '2px');
        $('.code_cell:contains("hide_me_please")').css('border-width', '1px');
        $(".hidedev_codes").show();
        $(".showdev_codes").hide();
    } else {
        $('.output_javascript:contains("hide_me_please")').hide();
        $('.input:contains("hide_me_please")').hide();
        $('.prompt:contains("hide_me_please")').hide();
        $('.code_cell:contains("hide_me_please")').css('padding', '0px');
        $('.code_cell:contains("hide_me_please")').css('border-width', '0px');
        $(".hidedev_codes").hide();
        $(".showdev_codes").show(); 
    }
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function temporary_output_toggle(target_id, change=true){
    if (change) {
        temporary_output_show=!temporary_output_show;
    }
    if(temporary_output_show) {
        $('.output_javascript:contains("this_is_temporary_output")').show();
        $('.output_text:contains("this_is_temporary_output")').each(function(i, el) {
            el.style.visibility = "visible";
            node = el.parentNode.parentNode.parentNode;
            node.style.display = "block";
            node.style.visibility = "visible";
        });
        $('.output_error').each(function(i, node) {
            parent = node.parentNode.parentNode.parentNode;
            console.log(`error {node} in {parent}`);
            console.log(parent.parentNode.innerText)
            if (parent.parentNode.textContent.includes("this_is_temporary_cell")) {
                console.log("show");
                parent.style.display = "block";
                parent.style.visibility = "visible";
            }
        });
        $(".hidetemporary_output").show();
        $(".showtemporary_output").hide();
    } else {
        $('.output_javascript:contains("this_is_temporary_output")').hide();
        $('.output_text:contains("this_is_temporary_output")').each(function(i, el) {
            el.style.visibility = "hidden";
            node = el.parentNode.parentNode.parentNode;
            node.style.display = "none";
            node.style.visibility = "hidden";
        });
        $('.output_error').each(function(i, node) {
            parent = node.parentNode.parentNode.parentNode;
            console.log(`error {node} in {parent}`);
            if (parent.parentNode.textContent.includes("this_is_temporary_cell")) {
                console.log("hide");
                parent.style.display = "none";
                parent.style.visibility = "hidden";
            }
        });
        $(".hidetemporary_output").hide();
        $(".showtemporary_output").show();
    }
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function temporary_input_toggle(target_id, change=true){
    if (change) {
        temporary_input_show=!temporary_input_show;
    }
    if(temporary_input_show) {
        $('.output_javascript:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').show();
        $('.input:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').show();
        $('.prompt:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').show();
        $('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('padding', '2px');
        $('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('border-width', '1px');
        $(".hidetemporary_input").show();
        $(".showtemporary_input").hide();
    } else {
        $('.output_javascript:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').hide();
        $('.input:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').hide();
        $('.prompt:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').hide();
        $('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('padding', '0px');
        $('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('border-width', '0px');
        $(".hidetemporary_input").hide();
        $(".showtemporary_input").show(); 
    }
    output_text_toggle('', false);
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function output_stderr_toggle(target_id, change=true){
    if (change) {
        output_stderr_show=!output_stderr_show;
    }
    if(output_stderr_show) {
        $('.output_stderr').each(function(i, node) {
            parent = node.parentNode.parentNode.parentNode.parentNode;
            console.log(`stderr {node} in {parent}`);
            if (!parent.textContent.includes("this_is_temporary_output")) {
                console.log("show");
                node.parentNode.style.display = "";
                node.parentNode.style.visibility = "visible";
            }
        });
        $(".hideoutput_stderr").show();
        $(".showoutput_stderr").hide();
    } else {
        $('.output_stderr').each(function(i, node) {
            parent = node.parentNode.parentNode.parentNode.parentNode;
            console.log(`stderr {node} in {parent}`);
            if (!parent.textContent.includes("this_is_temporary_output")) {
                console.log("hide");
                node.parentNode.style.display = "none";
                node.parentNode.style.visibility = "hidden";
            }
        });
        $('.hideoutput_stderr').hide();
        $('.showoutput_stderr').show();
    }
    temporary_output_toggle('', false);
    output_text_toggle('', false);
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function output_error_toggle(target_id, change=true){
    if (change) {
        output_error_show=!output_error_show;
    }
    if(output_error_show) {
        $('.output_error').each(function(i, node) {
            parent = node.parentNode.parentNode.parentNode;
            console.log(`error {node} in {parent}`);
            console.log(parent.parentNode.innerText)
            if (!parent.parentNode.textContent.includes("this_is_temporary_cell")) {
                console.log("show");
                parent.style.display = "block";
                parent.style.visibility = "visible";
            }
        });
        $(".hideoutput_error").show();
        $(".showoutput_error").hide();
    } else {
        $('.output_error').each(function(i, node) {
            parent = node.parentNode.parentNode.parentNode;
            console.log(`error {node} in {parent}`);
            if (!parent.parentNode.textContent.includes("this_is_temporary_cell")) {
                console.log("hide");
                parent.style.display = "none";
                parent.style.visibility = "hidden";
            }
        });
        $('.hideoutput_error').hide();
        $('.showoutput_error').show();
    }
    temporary_input_toggle('', false);
    output_text_toggle('', false);
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function output_report_or_work_toggle(target_id, change=true) {
    if (change) {
        output_report_view=!output_report_view;
    }
    if(output_report_view) {
        code_show=false;
        code_toggle('', change=false);
        dev_codes_show=false;
        output_text_show=true;
        output_stderr_show=false;
        output_error_show=false;
        temporary_output_show=false;
        temporary_input_show=false;
        $('.prompt, .input, .prompt_container').hide();
        $('.out_prompt_overlay').hide();
        $('.code_cell').css('padding', '0px');
        $('.code_cell').css('border-width', '0px');
        $(".showoutput_work_view").show();
        $(".showoutput_report_view").hide();
    } else {
        code_show=true;
        code_toggle('', change=false);
        output_text_show=true;
        output_stderr_show=true;
        output_error_show=true;
        $('.input, .prompt, .prompt_container').show();
        $('.out_prompt_overlay').show();
        $('.code_cell').css('padding', '2px');
        $('.code_cell').css('border-width', '1px');
        $('.showoutput_work_view').hide();
        $('.showoutput_report_view').show();
    }
    dev_codes_toggle('', change=false);
    temporary_output_toggle('', change=false);
    temporary_input_toggle('', change=false);
    output_text_toggle('', change=false);
    output_stderr_toggle('', change=false);
    output_error_toggle('', change=false);
    if (target_id != "") {
        $(document).ready(function() {
            $(target_id).get(0).scrollIntoView();
        })
    }
}

function initialize(){
    code_toggle('', change=false);
    dev_codes_toggle('', change=false);
    temporary_output_toggle('', change=false);
    temporary_input_toggle('', change=false);
    output_text_toggle('', change=false);
    output_stderr_toggle('', change=false);
    output_error_toggle('', change=false);
    output_report_or_work_toggle('', change=false);
    $(".showhide, .showcode, .showdev_codes, .hidedev_codes, .showtemporary_output, .hidetemporary_output, .showtemporary_input, .hidetemporary_input, .hidecode, .showoutput_text, .hideoutput_text, .showoutput_stderr, .hideoutput_stderr, .showoutput_error, .hideoutput_error, .showoutput_work_view, .showoutput_report_view").css('font-style', 'italic');
    $(".showhide, .showcode, .showdev_codes, .hidedev_codes, .showtemporary_output, .hidetemporary_output, .showtemporary_input, .hidetemporary_input, .hidecode, .showoutput_text, .hideoutput_text, .showoutput_stderr, .hideoutput_stderr, .showoutput_error, .hideoutput_error, .showoutput_work_view, .showoutput_report_view").css('text-align', 'right');
    $(".showhide, .showcode, .showdev_codes, .hidedev_codes, .showtemporary_output, .hidetemporary_output, .showtemporary_input, .hidetemporary_input, .hidecode, .showoutput_text, .hideoutput_text, .showoutput_stderr, .hideoutput_stderr, .showoutput_error, .hideoutput_error, .showoutput_work_view, .showoutput_report_view").css('font-size', '80%');
}
$(document).ready(initialize);
</script>