%%html
<!--hide_me_please-->
<p class="showoutput_report_view" id="first123456aa_article_view"><a href="javascript:output_report_or_work_toggle('#first123456_work_view')">show the report view</a></p>
<p class="showoutput_work_view" id="first123456_work_view"><a href="javascript:output_report_or_work_toggle('#first123456aa_article_view')">show the work view</a></p>
<p class="showcode" id="first123456aa"><a href="javascript:code_toggle('#first123456')">show codes in the notebook</a></p>
<p class="hidecode" id="first123456"><a href="javascript:code_toggle('#first123456aa')">hide codes in the notebook</a></p>
<p class="showoutput_text" id="first123456aa_text"><a href="javascript:output_text_toggle('#first123456_text')">show text outputs in the notebook</a></p>
<p class="hideoutput_text" id="first123456_text"><a href="javascript:output_text_toggle('#first123456aa_text')">hide text outputs in the notebook</a></p>
# hide_me_please
from IPython.display import HTML
def this_is_temporary_cell():
cell_output_is_temporary()
def cell_output_is_temporary():
HTML("<!--this_is_temporary_output-->")
print("# this_is_temporary_output")
# hide_me_please
from plotly.offline import init_notebook_mode
init_notebook_mode()
%%html
<!--hide_me_please-->
<p class="hidecode" id="sdgerewere"><a href="javascript:code_toggle('#sdgerewere')">hide codes in the notebook</a></p>
# %reset out
# import dill
# dill.dump_session("notebook_env.db")
# import dill
# dill.load_session('notebook_env.db')
# Run the following installation:
# !pip install ../timeseries/
# !pip install git+https://github.com/krzpiesiewicz/timeseries
# !pip uninstall timeseries
%%html
<!--hide_me_please-->
<p class="hidecode" id="23thgf45trgdg"><a href="javascript:code_toggle('#23thgf45trgdg')">hide codes in the notebook</a></p>
# imports
import os
import sys
import time
import IPython.display
from IPython.display import HTML
from pprint import pprint
import matplotlib.colors as mcolors
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import scipy.linalg
from scipy.stats import norm
from statsmodels.tsa.stattools import acovf
from statsmodels.tsa.stattools import levinson_durbin_pacf as ar_from_pacf
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.tools import Bunch, add_constant
from statsmodels.compat.numpy import lstsq
from statsmodels.regression.linear_model import OLS
import timeseries as tss
from timeseries import plot_ts
from timeseries.analysis import acf, pacf, plot_hist, plot_acf, plot_pacf, plot_stats
from timeseries.transform import IHSTransformer, get_smoothed
from timeseries.plotting import fig_with_vertical_subplots, ax_settings
from timeseries.forecast.scorings import get_scoring
%%html
<!--hide_me_please-->
<p class="hidecode" id="344td34gf3tgf3"><a href="javascript:code_toggle('#344td34gf3tgf3')">hide codes in the notebook</a></p>
# import warnings
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
plt.rcParams.update(
{
"figure.max_open_warning": 0,
"legend.framealpha": 1
}
)
%%html
<!--hide_me_please-->
<p class="hidecode" id="23thgf45trgdg123454"><a href="javascript:code_toggle('#23thgf45trgdg123454')">hide codes in the notebook</a></p>
# load extensions
%load_ext nb_black
%load_ext autoreload
%autoreload 2
%aimport timeseries
# load data
eeg_data = pd.read_csv("data/eeg_features_raw.csv")
eeg_ts = [eeg_data[col] for col in eeg_data][:-1]
%%html
<!--hide_me_please-->
<p class="showcode" id="34rfdertgfaa"><a href="javascript:code_toggle('#34rfdertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="34rfdertgf"><a href="javascript:code_toggle('#34rfdertgfaa')">hide codes in the notebook</a></p>
# definig an interval for fitting normalization
ts = eeg_ts[0]
transformation_train_intv = tss.Interval(ts, 2, 1000)
val_intv = tss.Interval(ts, transformation_train_intv.end)
fig = plot_ts(
transformation_train_intv.view(ts),
color="orange",
name="transformation train interval",
ax_height=300,
)
plot_ts(val_intv.view(), color="tab:blue", subtitle="", fig=fig)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="145ygfsdfghjtraa"><a href="javascript:code_toggle('#145ygfsdfghjtr')">show codes in the notebook</a></p>
<p class="hidecode" id="145ygfsdfghjtr"><a href="javascript:code_toggle('#145ygfsdfghjtraa')">hide codes in the notebook</a></p>
# standard normalization
standard_trans = [
IHSTransformer(transformation_train_intv.view(ts), lmb=None) for ts in eeg_ts
]
standard_trans_ts = [trans.transform(ts) for ts, trans in zip(eeg_ts, standard_trans)]
# IHS normalization
ihs_trans = [
IHSTransformer(transformation_train_intv.view(ts), verbose=True) for ts in eeg_ts
]
ihs_trans_ts = [trans.transform(ts) for ts, trans in zip(eeg_ts, ihs_trans)]
fig = plot_ts(
standard_trans_ts[0][5525:5720],
color="darkblue",
marker="",
linewidth=1.2,
ax_height=300,
)
mask = np.abs(standard_trans_ts[0][5525:5720]) > 2
plot_ts(
standard_trans_ts[0][5525:5720][mask],
marker="o",
# alpha=0.5,
markersize=7,
linestyle="",
color="darkblue",
name="standardized",
fig=fig,
)
plot_ts(ihs_trans_ts[0][5525:5720], color="orange", marker="", linewidth=1.2, fig=fig)
plot_ts(
ihs_trans_ts[0][5525:5720][mask],
marker="o",
markersize=7,
# alpha=0.8,
linestyle="",
color="orange",
name="IHS-normalized",
fig=fig,
subtitle="",
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="45ygfsdfghjtraa"><a href="javascript:code_toggle('#45ygfsdfghjtr')">show codes in the notebook</a></p>
<p class="hidecode" id="45ygfsdfghjtr"><a href="javascript:code_toggle('#45ygfsdfghjtraa')">hide codes in the notebook</a></p>
# Q-Q plot: EEG data versus a normal distribution
n = 7000
begin = 1000
ts_idx = 5
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, n))
normal_q = pd.Series(normal_samples, index=normal_samples)
standard_q = pd.Series(
np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
index=normal_samples,
)
ihs_q = pd.Series(
np.sort(ihs_trans_ts[ts_idx][begin : begin + n]), index=normal_samples
)
fig = plot_ts(
normal_q,
color="black",
xtitle="normal theoretical quantiles",
ytitle="data quantiles",
title="Quantile-Quantile Plot for EEG Data",
subtitle="EEG data versus a normal distribution",
width=1000,
height=1000,
)
markersize = 5
plot_ts(
standard_q,
color="tab:blue",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="standard",
)
plot_ts(
ihs_q,
color="tab:red",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="IHS",
)
display(fig)
# mean Q-Q plot: EEG data versus a normal distribution
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
ts_idx = 5
n = len(begins)
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, ts_len))
normal_q = pd.Series(normal_samples, index=normal_samples)
fig = plot_ts(
normal_q,
color="black",
xtitle="normal theoretical quantiles",
ytitle="data quantiles",
title="Mean Quantile-Quantile Plot for EEG Data",
subtitle="EEG data versus a normal distribution",
width=1000,
height=1000,
)
markersize = 5
mean_standard_q = pd.Series(
np.zeros(ts_len),
index=normal_samples,
)
mean_ihs_q = pd.Series(np.zeros(ts_len), index=normal_samples)
for begin, end in zip(begins, ends):
standard_q = pd.Series(
np.sort(standard_trans_ts[ts_idx][begin:end]),
index=normal_samples,
)
ihs_q = pd.Series(np.sort(ihs_trans_ts[ts_idx][begin:end]), index=normal_samples)
mean_standard_q += standard_q
mean_ihs_q += ihs_q
mean_standard_q /= n
mean_ihs_q /= n
plot_ts(
mean_standard_q,
color="tab:blue",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="standard",
)
plot_ts(
mean_ihs_q,
color="tab:red",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="IHS",
)
display(fig)
len(eeg_ts)
# mean of means Q-Q plot: EEG data versus a normal distribution
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
n = len(begins)
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, ts_len))
normal_q = pd.Series(normal_samples, index=normal_samples)
fig = plot_ts(
normal_q,
color="black",
xtitle="normal theoretical quantiles",
ytitle="data quantiles",
title="Mean of Means Quantile-Quantile Plot for EEG Data",
subtitle="EEG data versus a normal distribution",
width=1000,
height=1000,
)
markersize = 5
mean_of_means_standard_q = pd.Series(
np.zeros(ts_len),
index=normal_samples,
)
mean_of_means_ihs_q = pd.Series(np.zeros(ts_len), index=normal_samples)
m = len(eeg_ts)
for ts_idx in range(m):
mean_standard_q = pd.Series(
np.zeros(ts_len),
index=normal_samples,
)
mean_ihs_q = pd.Series(np.zeros(ts_len), index=normal_samples)
for begin, end in zip(begins, ends):
standard_q = pd.Series(
np.sort(standard_trans_ts[ts_idx][begin:end]),
index=normal_samples,
)
ihs_q = pd.Series(
np.sort(ihs_trans_ts[ts_idx][begin:end]), index=normal_samples
)
mean_standard_q += standard_q
mean_ihs_q += ihs_q
mean_standard_q /= n
mean_ihs_q /= n
mean_of_means_standard_q += mean_standard_q
mean_of_means_ihs_q += mean_ihs_q
mean_of_means_standard_q /= m
mean_of_means_ihs_q /= m
plot_ts(
mean_of_means_standard_q,
color="tab:blue",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="standard",
)
plot_ts(
mean_of_means_ihs_q,
color="tab:red",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="IHS",
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="123asdfery735aa"><a href="javascript:code_toggle('#123asdfery735')">show codes in the notebook</a></p>
<p class="hidecode" id="123asdfery735"><a href="javascript:code_toggle('#123asdfery735aa')">hide codes in the notebook</a></p>
# Q-Q plot: EEG data versus a normal distribution
n = 7000
begin = 1000
ts_idx = 5
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, n))
normal_q = pd.Series(normal_samples, index=normal_samples)
standard_q = pd.Series(
np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
index=normal_samples,
)
ihs_q = pd.Series(
np.sort(ihs_trans_ts[ts_idx][begin : begin + n]), index=normal_samples
)
fig = plot_ts(
normal_q,
color="black",
engine="plotly",
name="theoretical normal",
xaxis_title="normal theoretical quantiles",
yaxis_title="data quantiles",
title="Quantile-Quantile Plot for EEG Data",
subtitle="EEG data versus a normal distribution",
width=1000,
height=1000,
)
markersize = 5
plot_ts(
standard_q,
color="tab:blue",
fig=fig,
name="standard",
)
plot_ts(
ihs_q,
color="tab:red",
fig=fig,
name="IHS",
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="asdfery735aa"><a href="javascript:code_toggle('#asdfery735')">show codes in the notebook</a></p>
<p class="hidecode" id="asdfery735"><a href="javascript:code_toggle('#asdfery735aa')">hide codes in the notebook</a></p>
# Q-Q plot: IHS-transformerd data versus standard normalized data
import matplotlib.pyplot as plt
from scipy.stats import norm
n = 1000
begin = 6000
qs = pd.Series(
np.sort(ihs_trans_ts[ts_idx][begin : begin + n]),
index=np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
)
fig = plot_ts(
pd.Series(
np.sort(ihs_trans_ts[ts_idx][begin : begin + n]),
index=np.sort(ihs_trans_ts[ts_idx][begin : begin + n]),
),
color="black",
xtitle="standard quantiles",
ytitle="IHS quantiles",
title="Quantile-Quantile Plot for EEG Data",
subtitle="IHS-transformerd data versus standard normalized data",
width=1000,
height=1000,
)
markersize = 5
plot_ts(
qs,
color="tab:green",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
)
fig
%%html
<!--hide_me_please-->
<p class="showcode" id="765432asdfrewaa"><a href="javascript:code_toggle('#765432asdfrew')">show codes in the notebook</a></p>
<p class="hidecode" id="765432asdfrew"><a href="javascript:code_toggle('#765432asdfrewaa')">hide codes in the notebook</a></p>
# pacf methods definitions and descriptions
ols = "ols"
burg = "burg"
ld_biased = "ld_biased"
pacf_methods_names = {
ols: "the OLS",
burg: "Burg's",
ld_biased: "the Durbin-Levinson",
}
methods_descrs = {
ols: "the ordinary least squares method with an intercept",
burg: "Burg's algorithm",
ld_biased: "the Durbin-Levinson recursion without bias correction",
}
# def plot_diverse_intervals_experiments
def plot_pacf_for_intervals(
ts, ts_type, begins, ends, ps, pacf_method, alpha=0.05, nlags=None, **kwargs
):
for begin, end, p in zip(begins, ends, ps):
color = (0, 1 - p * 0.5, 0)
plot_pacf(
ts[begin:end],
alpha=alpha,
nlags=nlags,
method=pacf_method,
label=f"[{begin},{end-1}]",
color=color,
conf_alpha=0.02,
**kwargs,
)
if fig is not None:
return fig
def plot_diverse_intervals_experiments(
begins,
ends,
ps,
intervals_type,
pacf_methods,
ts_list,
title=None,
subtitle=None,
**kwargs,
):
const_title = title is not None
const_subtitle = subtitle is not None
for pacf_method in pacf_methods:
method_descr = methods_descrs[pacf_method]
if not const_title:
title = f"{pacf_method.upper()} PACF Values Computed on {intervals_type}"
fig, axs = fig_with_vertical_subplots(
len(ts_list),
title=title,
subplots_titles=True,
ax_height=450,
fontsize=15,
)
for (ts, ts_type), ax in zip(ts_list, axs):
if not const_subtitle:
subtitle = f"{method_descr} for {ts_type}"
plot_pacf_for_intervals(
ts,
ts_type,
begins,
ends,
ps,
pacf_method,
ax=ax,
subtitle=subtitle,
**kwargs,
)
display(fig)
%%html
<!--hide_me_please-->
<p id="rtr4554rf11aa"></p>
<p class="hidecode" id="rtr4554rf11"><a href="javascript:code_toggle('#rtr4554rf11aa')">hide codes in the notebook</a></p>
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 60
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ld_biased],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="7654nbvcdfguyaa"><a href="javascript:code_toggle('#7654nbvcdfguy')">show codes in the notebook</a></p>
<p class="hidecode" id="7654nbvcdfguy"><a href="javascript:code_toggle('#7654nbvcdfguyaa')">hide codes in the notebook</a></p>
# OLS pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ols],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="jhgfuytr7654121212aa"><a href="javascript:code_toggle('#jhgfuytr7654121212')">show codes in the notebook</a></p>
<p class="hidecode" id="jhgfuytr7654121212"><a href="javascript:code_toggle('#jhgfuytr7654121212aa')">hide codes in the notebook</a></p>
# burg's pacf experiments on higly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[burg],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="dtrjertthh765aa"><a href="javascript:code_toggle('#dtrjertthh765')">show codes in the notebook</a></p>
<p class="hidecode" id="dtrjertthh765"><a href="javascript:code_toggle('#dtrjertthh765aa')">hide codes in the notebook</a></p>
# ld-biased pacf experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ld_biased],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="34567uhgfvbyuyutyaa"><a href="javascript:code_toggle('#34567uhgfvbyuyuty')">show codes in the notebook</a></p>
<p class="hidecode" id="34567uhgfvbyuyuty"><a href="javascript:code_toggle('#34567uhgfvbyuyutyaa')">hide codes in the notebook</a></p>
# burg's pacf experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[burg],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="asfnitohaefw434312erfd345tfdaa"><a href="javascript:code_toggle('#asfnitohaefw434312erfd345tfd')">show codes in the notebook</a></p>
<p class="hidecode" id="asfnitohaefw434312erfd345tfd"><a href="javascript:code_toggle('#asfnitohaefw434312erfd345tfdaa')">hide codes in the notebook</a></p>
# burg's pacf experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ols],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="fnitohaefw434312erfd345tfdaa"><a href="javascript:code_toggle('#fnitohaefw434312erfd345tfd')">show codes in the notebook</a></p>
<p class="hidecode" id="fnitohaefw434312erfd345tfd"><a href="javascript:code_toggle('#fnitohaefw434312erfd345tfdaa')">hide codes in the notebook</a></p>
# def pacf_means_and_stds
def pacf_means_and_stds(ts, begins, ends, pacf_method, alpha=None, nlags=20):
n = len(begins)
assert len(ends) == n
pacf_values = np.zeros((n, nlags + 1), dtype=float)
for i, (begin, end) in enumerate(zip(begins, ends)):
res = pacf(ts[begin:end], method=pacf_method, alpha=alpha, nlags=nlags)
if alpha is None:
res = (res, None)
values, conf_intvs = res
pacf_values[i, :] = values
mu = np.mean(pacf_values, 0)
std = np.std(pacf_values, 0)
return mu, std, conf_intvs
%%html
<!--hide_me_please-->
<p class="showcode" id="utgjtygfrewsaa"><a href="javascript:code_toggle('#utgjtygfrews')">show codes in the notebook</a></p>
<p class="hidecode" id="utgjtygfrews"><a href="javascript:code_toggle('#utgjtygfrewsaa')">hide codes in the notebook</a></p>
# Mean PACF With Standard Deviations at Lags
for pacf_method in [ld_biased, ols]:
ts_idx = 5
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
alpha = 0.05
conf_value = np.diff(
pacf(standard_trans_ts[ts_idx][:ts_len], method=pacf_method, alpha=alpha)[1][1]
)
mu_standard, std_standard, _ = pacf_means_and_stds(
standard_trans_ts[ts_idx], begins, ends, pacf_method=pacf_method, nlags=nlags
)
std_bar_standard = np.transpose(
np.array([mu_standard - std_standard, mu_standard + std_standard])
)
mu_ihs, std_ihs, _ = pacf_means_and_stds(
ihs_trans_ts[ts_idx], begins, ends, pacf_method=pacf_method, nlags=nlags
)
std_bar_ihs = np.transpose(np.array([mu_ihs - std_ihs, mu_ihs + std_ihs]))
fig = plot_stats(
mu_standard,
std_bar_standard,
label="standard",
title="Mean PACF With Standard Deviations at Lags",
subtitle=f"{pacf_method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
showgrid=True,
)
plot_stats(mu_ihs, std_bar_ihs, color="tab:red", label="IHS", fig=fig)
plot_ts(
pd.Series([float(conf_value)] * (nlags), index=np.arange(1, nlags + 1)),
linestyle="--",
color="gray",
name=f"{100 * (1-alpha)}% confidence interval",
fig=fig,
)
plot_ts(
pd.Series([-float(conf_value)] * (nlags), index=np.arange(1, nlags + 1)),
linestyle="--",
color="gray",
fig=fig,
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="dfghjkluiytdddyhgtfrdeaa"><a href="javascript:code_toggle('#dfghjkluiytdddyhgtfrde')">show codes in the notebook</a></p>
<p class="hidecode" id="dfghjkluiytdddyhgtfrde"><a href="javascript:code_toggle('#dfghjkluiytdddyhgtfrdeaa')">hide codes in the notebook</a></p>
# mean std of nth lag of pacf
nlags = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_std(tss, pacf_method):
mean_std_arr = np.zeros(nlags + 1)
for ts in tss:
_, std, _ = pacf_means_and_stds(
ts, begins, ends, pacf_method=pacf_method, nlags=nlags
)
mean_std_arr += std
return mean_std_arr / len(tss)
kwargs = {}
for pacf_method, color in zip(
[ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]
):
mean_std_standard = mean_std(standard_trans_ts, pacf_method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, pacf_method=pacf_method)
fig = plot_ts(
mean_std_standard,
calc_xticks=True,
major_xstep=1,
color=color,
linestyle="-",
marker="o",
name=f"{pacf_methods_names[pacf_method]} method, standardized",
title="Mean Standard Deviation of the n-th lag of the PACF",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} PACFs.\nEach of {len(begins)} PACFs are computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="lag",
height=800,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
color=color,
linestyle="--",
marker="o",
name=f"{pacf_methods_names[pacf_method]} method, IHS",
**kwargs,
)
# for alpha in [0.05, 0.1]:
# conf_value = np.diff(
# pacf(standard_trans_ts[5][:ts_len], method=ld_biased, alpha=alpha)[1][1]
# )
# plot_ts(
# pd.Series([float(conf_value)] * 2, index=[1, nlags]),
# linestyle="--",
# color="black",
# alpha=1 - alpha,
# name=f"{100 * np.power(1 - alpha, 2)}% confidence interval",
# fig=fig,
# )
display(fig)
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_std(tss, pacf_method):
mean_std_arr = np.zeros(nlags + 1)
for ts in tss:
_, std, _ = pacf_means_and_stds(
ts, begins, ends, pacf_method=pacf_method, nlags=nlags
)
mean_std_arr += std
return mean_std_arr / len(tss)
mean_std_standard_lst = []
mean_std_ihs_lst = []
for pacf_method, color in zip(
[ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]
):
mean_std_standard_lst.append(mean_std(standard_trans_ts, pacf_method=pacf_method))
mean_std_ihs_lst.append(mean_std(ihs_trans_ts, pacf_method=pacf_method))
mean_std_standard_lst
%%html
<!--hide_me_please-->
<p class="showcode" id="111dfghjkluiytdddyhgtfrdeaa"><a href="javascript:code_toggle('#111dfghjkluiytdddyhgtfrde')">show codes in the notebook</a></p>
<p class="hidecode" id="111dfghjkluiytdddyhgtfrde"><a href="javascript:code_toggle('#111dfghjkluiytdddyhgtfrdeaa')">hide codes in the notebook</a></p>
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_std(tss, pacf_method):
mean_std_arr = np.zeros(nlags + 1)
for ts in tss:
_, std, _ = pacf_means_and_stds(
ts, begins, ends, pacf_method=pacf_method, nlags=nlags
)
mean_std_arr += std
return mean_std_arr / len(tss)
kwargs = {}
for pacf_method, color, (mean_std_standard, mean_std_ihs) in zip(
[ld_biased, burg, ols],
["tab:blue", "darkred", "tab:orange"],
zip(mean_std_standard_lst, mean_std_ihs_lst),
):
fig = plot_ts(
get_smoothed(pd.Series(mean_std_standard), std=2, only_prevs=False),
color=color,
linestyle="-",
linewidth=3,
alpha=0.5,
# marker="o",
name=f"{pacf_method} method on standard",
title="Mean Standard Deviation of the n-th lag of the PACF",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consiting of {len(begins)} PACFs.\nEach of {len(begins)} PACFs are computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="lag",
height=800,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
get_smoothed(pd.Series(mean_std_ihs), std=2, only_prevs=False),
color=color,
linestyle="--",
linewidth=3,
alpha=0.5,
# marker="o",
name=f"{pacf_method} method on IHS",
**kwargs,
)
display(fig)
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_std(tss, pacf_method):
mean_std_arr = np.zeros(nlags + 1)
for ts in tss:
_, std, _ = pacf_means_and_stds(
ts, begins, ends, pacf_method=pacf_method, nlags=nlags
)
mean_std_arr += std
return mean_std_arr / len(tss)
kwargs = dict(
title="Mean Standard Deviation of the n-th lag of the PACF",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consiting of {len(begins)} PACFs.\nEach of {len(begins)} PACFs are computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="lag",
height=500,
)
for pacf_method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
[ld_biased, burg, ols],
[("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
zip(mean_std_standard_lst, mean_std_ihs_lst),
):
fig = plot_ts(
get_smoothed(pd.Series(mean_std_standard), std=2, only_prevs=False),
color="darkblue",
linestyle=linestyle,
linewidth=linewidth,
# alpha=0.5,
# marker="o",
name=f"{pacf_methods_names[pacf_method]} method for standardized",
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
get_smoothed(pd.Series(mean_std_ihs), std=2, only_prevs=False),
color="tab:orange",
linestyle=linestyle,
linewidth=linewidth,
# alpha=0.5,
# marker="o",
name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
**kwargs,
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="566grtrtrgr665jkjaa"><a href="javascript:code_toggle('#566grtrtrgr665jkj')">show codes in the notebook</a></p>
<p class="hidecode" id="566grtrtrgr665jkj"><a href="javascript:code_toggle('#566grtrtrgr665jkjaa')">hide codes in the notebook</a></p>
# mean_autocorr
def mean_autocorr(begins, ends, title=None, subtitle=None, nlags=20):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
ts_len = ends[0] - begins[0]
if title is None:
title = "Mean Autocovariance"
kwargs = dict(title=title, subtitle=subtitle, showgrid=True)
fig = None
for ts, ts_name, color in [
(standard_trans_ts, "standard", "tab:blue"),
(ihs_trans_ts, "IHS", "tab:red"),
]:
autocorrs = np.zeros((len(begins), nlags + 1))
for i, (begin, end) in enumerate(zip(begins, ends)):
autocorrs[i, :] = acovf(ts[ts_idx][begin:end], fft=False)[: nlags + 1]
autocorrs[i, :] /= autocorrs[i, 0]
mu = np.mean(autocorrs, 0)
std = np.std(autocorrs, 0)
fig = plot_stats(
mu,
std=std,
xs=np.arange(0, nlags + 1),
label=ts_name,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
plot_ts(
pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
name=f"{100 * (1-alpha)}% confidence interval",
fig=fig,
)
plot_ts(
pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
fig=fig,
)
display(fig)
%%html
<!--hide_me_please-->
<p id="23ragflgothrt39raa"></p>
<p class="hidecode" id="23ragflgothrt39r"><a href="javascript:code_toggle('#23ragflgothrt39raa')">hide codes in the notebook</a></p>
# Mean of the Autocorrelations of Highly Overlapping Intervals
nlags = 60
alpha = 0.05
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 20, 1))
begins = [begin + n for n in range(20)]
mean_autocorr(
begins,
ends,
title=f"Mean of the Autocovariances of Highly Overlapping Intervals",
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="3344343fgfbbnlo8haa"><a href="javascript:code_toggle('#3344343fgfbbnlo8h')">show codes in the notebook</a></p>
<p class="hidecode" id="3344343fgfbbnlo8h"><a href="javascript:code_toggle('#3344343fgfbbnlo8haa')">hide codes in the notebook</a></p>
# Mean Autocorrelations
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mean_autocorr(
begins,
ends,
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
nlags=nlags,
)
# Mean Autocorrelations
alpha = 0.05
nlags = 60
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mean_autocorr(
begins,
ends,
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
nlags=nlags,
)
# mean_autocorr
def mean_autocorr_diffs(
begins, ends, ts_idx, title=None, subtitle=None, nlags=20, plot=True
):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
ts_len = ends[0] - begins[0]
if title is None:
title = "Mean Autocovariance Diffs"
kwargs = dict(title=title, subtitle=subtitle, showgrid=True)
fig = None
autocorrs = np.zeros((2, len(begins), nlags + 1))
autocorrs_diffs = np.zeros((len(begins), nlags + 1))
for idx, ts in enumerate(
[
standard_trans_ts,
ihs_trans_ts,
]
):
for i, (begin, end) in enumerate(zip(begins, ends)):
autocorrs[idx, i, :] = np.abs(
acovf(ts[ts_idx][begin:end], fft=False)[: nlags + 1]
)
autocorrs[idx, i, :] /= autocorrs[idx, i, 0]
for i in range(len(begins)):
autocorrs_diffs[i, :] = autocorrs[0, i, :] - autocorrs[1, i, :]
mu = np.mean(autocorrs_diffs, 0)
std = np.std(autocorrs_diffs, 0)
if plot:
fig = plot_stats(
mu,
std=std,
xs=np.arange(0, nlags + 1),
# label="",
color="tab:blue",
**kwargs,
)
kwargs = dict(fig=fig)
conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
plot_ts(
pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
name=f"{100 * (1-alpha)}% confidence interval",
fig=fig,
)
plot_ts(
pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
fig=fig,
)
display(fig)
return mu, std
nlags = 20
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mean_autocorr_diffs(begins, ends, ts_idx, title=None, subtitle=None, nlags=nlags)
# mean_autocorr
def mean_autocorr_diffs_averaged(
begins, ends, title=None, subtitle=None, nlags=20, **kwargs
):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
ts_len = ends[0] - begins[0]
if title is None:
title = "Mean Autocovariance Diffs - Averaged"
# kwargs = dict(title=title, subtitle=subtitle, showgrid=True)
index_values = np.arange(0, nlags + 1)
kwargs["title"] = title
kwargs["subtitle"] = subtitle
kwargs["showgrid"] = True
kwargs["calc_xticks"] = True
kwargs["minor_xticks"] = 1
kwargs["calc_grid"] = True
kwargs["major_xstep"] = 1
kwargs["index_values"] = index_values
fig = None
n = len(standard_trans_ts)
means = np.zeros((n, nlags + 1))
stds = np.zeros((n, nlags + 1))
for ts_idx in range(n):
mu, std = mean_autocorr_diffs(begins, ends, ts_idx, nlags=nlags, plot=False)
means[ts_idx, :] = mu
stds[ts_idx, :] = std
mu = np.mean(means, 0)
# std = np.mean(stds, 0)
std = np.std(means, 0)
fig = plot_stats(
mu,
std=std,
xs=index_values,
# label="",
color="darkblue",
conf_alpha=0.15,
**kwargs,
)
kwargs = dict(fig=fig)
conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
plot_ts(
pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
name=f"{100 * (1-alpha)}% confidence interval",
fig=fig,
)
plot_ts(
pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
fig=fig,
)
display(fig)
nlags = 20
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mean_autocorr_diffs_averaged(
begins, ends, title="", ax_height=300, subtitle=None, nlags=nlags
)
%%html
<!--hide_me_please-->
<p class="showcode" id="12345ygtfraa"><a href="javascript:code_toggle('#12345ygtfr')">show codes in the notebook</a></p>
<p class="hidecode" id="12345ygtfr"><a href="javascript:code_toggle('#12345ygtfraa')">hide codes in the notebook</a></p>
%%html
<!--hide_me_please-->
<p class="showcode" id="zxc12345ygtfraa"><a href="javascript:code_toggle('#zxc12345ygtfr')">show codes in the notebook</a></p>
<p class="hidecode" id="zxc12345ygtfr"><a href="javascript:code_toggle('#zxc12345ygtfraa')">hide codes in the notebook</a></p>
%%html
<!--hide_me_please-->
<p id="12345gfderthbvbv1234asdfewaa"></p>
<p class="hidecode" id="12345gfderthbvbv1234asdfew"><a href="javascript:code_toggle('#12345gfderthbvbv1234asdfewaa')">hide codes in the notebook</a></p>
# cov_matrix
# [γ(i−j)]_{i,j=1}^{n}
def cov_matrix(acovf):
rev_acovf = np.flipud(acovf)
n = len(acovf)
m = np.zeros((n, n))
for i in range(n):
m[i, :i] = rev_acovf[n - i - 1 : -1]
m[i, i:] = acovf[: n - i]
return m
# print("cov_matrix(np.array([1, 0.5, 0.7, 0.1]))")
# cov_matrix(np.array([1, 0.5, 0.7, 0.1]))
%%html
<!--hide_me_please-->
<p id="12345gfderthbvbv1234aa"></p>
<p class="hidecode" id="12345gfderthbvbv1234"><a href="javascript:code_toggle('#12345gfderthbvbv1234aa')">hide codes in the notebook</a></p>
# conditioning of the matrix computed using autocovariance with and without fft
ts_idx = 5
nlags = 60
for fft in [True, False]:
standard_aconvf = acovf(standard_trans_ts[ts_idx], fft=fft)[:nlags]
standard_aconvf /= standard_aconvf[0]
ihs_aconvf = acovf(ihs_trans_ts[ts_idx], fft=fft)[:nlags]
ihs_aconvf /= ihs_aconvf[0]
standard_cov = cov_matrix(standard_aconvf)
ihs_cov = cov_matrix(ihs_aconvf)
print(f"fft={fft}")
for q in [1, 2]:
for m, name in [
(standard_cov, "standard matrix:"),
(ihs_cov, "ihs matrix: "),
]:
print(f"{q}-norm of {name} {np.linalg.cond(m, q)}")
%%html
<!--hide_me_please-->
<p id="12345gfderthbvbvaa"></p>
<p class="hidecode" id="12345gfderthbvbv"><a href="javascript:code_toggle('#12345gfderthbvbvaa')">hide codes in the notebook</a></p>
# plot_mean_cond
def plot_mean_cond(begins, ends, title=None, subtitle=None, p=1, nlags=20):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
if title is None:
title = f"Mean Conditioning {p}-norm"
kwargs = dict(
title=title,
subtitle=subtitle,
calc_xticks=True,
yscale="log",
xtitle="lag (matrix of size: lag x lag)",
)
fig = None
for ts, ts_name, color in [
(standard_trans_ts, "standard", "tab:blue"),
(ihs_trans_ts, "IHS", "tab:red"),
]:
mean_conds = np.zeros(nlags)
std_conds = np.zeros(nlags)
for lags in np.arange(1, nlags + 1):
conds = []
for begin, end in zip(begins, ends):
acov = acovf(ts[ts_idx][begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
conds.append(np.linalg.cond(mcov, p))
mean_conds[lags - 1] = np.mean(conds)
std_conds[lags - 1] = np.std(conds)
fig = plot_stats(
mean_conds,
xs=np.arange(1, nlags + 1),
std=std_conds[0:],
std_xs=np.arange(1, nlags + 1),
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
%%html
<!--hide_me_please-->
<p id="2343rfsfsfsfsaa"></p>
<p class="hidecode" id="2343rfsfsfsfs"><a href="javascript:code_toggle('#2343rfsfsfsfsaa')">hide codes in the notebook</a></p>
# Mean of the Matrices Conditioning for Highly Overlapping Intervals
nlags = 60
alpha = 0.05
ts_idx = 10
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 20, 1))
begins = [begin + n for n in range(20)]
p = 2
plot_mean_cond(
begins,
ends,
title=f"Mean of the Matrices Conditioning for Highly Overlapping Intervals",
subtitle=f"(derrived from {p}-norm), {len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
p=p,
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="ghfdjks6758493aa"><a href="javascript:code_toggle('#ghfdjks6758493')">show codes in the notebook</a></p>
<p class="hidecode" id="ghfdjks6758493"><a href="javascript:code_toggle('#ghfdjks6758493aa')">hide codes in the notebook</a></p>
# Mean Conditioning
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2
plot_mean_cond(
begins,
ends,
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
p=p,
nlags=nlags,
)
# Mean Conditioning
alpha = 0.05
nlags = 60
ts_idx = 9
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2
plot_mean_cond(
begins,
ends,
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
p=p,
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="ertyu654ahlehjfaaa"><a href="javascript:code_toggle('#ertyu654ahlehjfa')">show codes in the notebook</a></p>
<p class="hidecode" id="ertyu654ahlehjfa"><a href="javascript:code_toggle('#ertyu654ahlehjfaaa')">hide codes in the notebook</a></p>
# mean_of_the_means_of_conds
# mean_of_the_stds_of_means
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_of_conds_means_and_stds(tss, p=1):
# tss = tss[0:2]
mean_conds = np.zeros((len(tss), nlags + 1))
std_conds = np.zeros((len(tss), nlags + 1))
for i, ts in enumerate(tss):
for lags in range(1, nlags + 1):
conds = []
for begin, end in zip(begins, ends):
acov = acovf(ts[begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
conds.append(np.linalg.cond(mcov, p))
mean_conds[i][lags] = np.mean(conds)
std_conds[i][lags] = np.std(conds)
return (
pd.Series(np.around(np.mean(mean_conds, 0)[1:], 1), index=range(1, nlags + 1)),
pd.Series(np.around(np.mean(std_conds, 0), 1)[1:], index=range(1, nlags + 1)),
)
p = 2
kwargs = dict(
title="Mean of the Means and Standard Deviations of Matrix Conditioning",
subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of conditionings({q}-norm derrived) of {len(begins)} "
f"covariance matrices of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
f"groups are computed for samples (of length {ts_len}) "
f"from the same EEG signal relating to the group.",
xtitle="N (matrix size)",
height=800,
calc_xticks=True,
)
fig = None
for ts, ts_name, color in [
(standard_trans_ts, "standard", "tab:blue"),
(ihs_trans_ts, "IHS", "tab:red"),
]:
mean_conds, std_conds = mean_of_conds_means_and_stds(ts, p=p)
std_bar = np.transpose(
np.array([np.around(mean_conds - std_conds, 1), mean_conds + std_conds])
)
fig = plot_stats(
mean_conds.values,
xs=mean_conds.index.values,
conf_intvs=std_bar,
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
yscale="log",
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="alejajhagheiuhr982r3hfuwiaa"><a href="javascript:code_toggle('#alejajhagheiuhr982r3hfuwi')">show codes in the notebook</a></p>
<p class="hidecode" id="alejajhagheiuhr982r3hfuwi"><a href="javascript:code_toggle('#alejajhagheiuhr982r3hfuwiaa')">hide codes in the notebook</a></p>
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
# tss = tss[0:2]
mean_norms = np.zeros((len(tss), nlags + 1))
std_norms = np.zeros((len(tss), nlags + 1))
for i, ts in enumerate(tss):
for lags in range(1, nlags + 1):
norms = []
for begin, end in zip(begins, ends):
acov = acovf(ts[begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
if inv:
mcov = np.linalg.inv(mcov)
norms.append(np.linalg.norm(mcov, p))
mean_norms[i][lags] = np.mean(norms)
std_norms[i][lags] = np.std(norms)
return (
pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
pd.Series(np.mean(std_norms, 0)[1:], index=range(1, nlags + 1)),
)
# return (
# pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
# pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
# )
p = 2
kwargs = dict(
title=f"Mean of the Means and Standard Deviations of Matrices' {p}-Norms",
subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of {len(begins)} "
f"covariance matrices' {p}-norms of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
f"groups are computed for samples (of length {ts_len}) "
f"from the same EEG signal relating to the group.",
xtitle="N (matrix size)",
height=800,
calc_xticks=True,
)
fig = None
for ts, ts_name, color, inv in [
(standard_trans_ts, "||A|| for standard", "tab:blue", False),
(standard_trans_ts, "||A^-1|| for standard", "skyblue", True),
(ihs_trans_ts, "||A|| for IHS", "tab:red", False),
(ihs_trans_ts, "||A^-1|| for IHS", "tab:orange", True),
]:
mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
# std_bar = np.transpose(
# np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
# )
std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
fig = plot_stats(
mean_norms.values,
xs=mean_norms.index.values,
conf_intvs=std_bar,
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
yscale="log",
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
# tss = tss[0:2]
mean_norms = np.zeros((len(tss), nlags + 1))
std_norms = np.zeros((len(tss), nlags + 1))
for i, ts in enumerate(tss):
for lags in range(1, nlags + 1):
norms = []
for begin, end in zip(begins, ends):
acov = acovf(ts[begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
if inv:
mcov = np.linalg.inv(mcov)
norms.append(np.linalg.norm(mcov, p))
mean_norms[i][lags] = np.mean(norms)
std_norms[i][lags] = np.std(norms)
return (
pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
)
# return (
# pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
# pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
# )
p = 2
kwargs = dict(
title=f"Mean of the Means and Standard Deviations of Matrices' {p}-Norms",
subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of {len(begins)} "
f"covariance matrices' {p}-norms of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
f"groups are computed for samples (of length {ts_len}) "
f"from the same EEG signal relating to the group.",
xtitle="N (matrix size)",
height=800,
calc_xticks=True,
)
fig = None
for ts, ts_name, color, inv in [
(standard_trans_ts, "||A|| for standard", "tab:blue", False),
(standard_trans_ts, "||A^-1|| for standard", "skyblue", True),
(ihs_trans_ts, "||A|| for IHS", "tab:red", False),
(ihs_trans_ts, "||A^-1|| for IHS", "tab:orange", True),
]:
mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
# std_bar = np.transpose(
# np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
# )
std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
fig = plot_stats(
mean_norms.values,
xs=mean_norms.index.values,
conf_intvs=std_bar,
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
yscale="log",
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
# tss = tss[0:2]
mean_norms = np.zeros((len(tss), nlags + 1))
std_norms = np.zeros((len(tss), nlags + 1))
for i, ts in enumerate(tss):
for lags in range(1, nlags + 1):
norms = []
for begin, end in zip(begins, ends):
acov = acovf(ts[begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
if inv:
mcov = np.linalg.inv(mcov)
norms.append(np.linalg.norm(mcov, p))
mean_norms[i][lags] = np.mean(norms)
std_norms[i][lags] = np.std(norms)
return (
pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
)
# return (
# pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
# pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
# )
p = 2
kwargs = dict(
title=f"Mean of the Means and Standard Deviations of Matrices' {p}-Norms",
subtitle=f"Mean of the {len(eeg_ts)} Means and STDs of groups of {len(begins)} "
f"covariance matrices' {p}-norms of size NxN.\nAll {len(begins)} matrices from each of {len(eeg_ts)} "
f"groups are computed for samples (of length {ts_len}) "
f"from the same EEG signal relating to the group.",
xtitle="N (matrix size)",
ax_height=400,
calc_xticks=True,
)
fig = None
for ts, ts_name, color, inv in [
(standard_trans_ts, "$||R_n||$ for standard", "tab:blue", False),
(standard_trans_ts, "$||R_n^{-1}||$ for standard", "skyblue", True),
(ihs_trans_ts, "$||R_n||_2$ for IHS", "tab:red", False),
(ihs_trans_ts, "$||R_n^-1||_2$ for IHS", "tab:orange", True),
]:
mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
# std_bar = np.transpose(
# np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
# )
std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
fig = plot_stats(
mean_norms.values,
xs=mean_norms.index.values,
conf_intvs=std_bar,
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
yscale="log",
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="1alejajhagheiuhr982r3hfuwiaa"><a href="javascript:code_toggle('#1alejajhagheiuhr982r3hfuwi')">show codes in the notebook</a></p>
<p class="hidecode" id="1alejajhagheiuhr982r3hfuwi"><a href="javascript:code_toggle('#1alejajhagheiuhr982r3hfuwiaa')">hide codes in the notebook</a></p>
m = np.array([[1, 2, 3], [4, 5, 6], [100, 1, 2]])
m
x = np.sum(m, axis=1)
x
m1 = np.array([[4, 3, 5], [6, 8, 10], [120, 5, 3]])
y = np.sum(m1, axis=1)
y
OLS(y, x).fit().params
np.min(np.linalg.eigvals(m))
# mean_of_alphas_and_deltas
def mean_of_alphas_and_deltas(nlags, ts_idx):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
stn_tss = [standard_trans_ts[ts_idx]]
ihs_tss = [ihs_trans_ts[ts_idx]]
mean_alphas = np.zeros((len(stn_tss), nlags + 1))
mean_deltas = np.zeros((len(stn_tss), nlags + 1))
mean_delta_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
mean_one_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
mean_min_eig_eye_pl_deltas = np.zeros((len(stn_tss), nlags + 1))
mean_min_lmb_pl_1mlmb_alphas = np.zeros((len(stn_tss), nlags + 1))
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
(len(stn_tss), nlags + 1)
)
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
(len(stn_tss), nlags + 1)
)
mean_stn_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
mean_bound_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
mean_bound_min_lambdas_without_delta_ineq = np.zeros((len(stn_tss), nlags + 1))
mean_ihs_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
mean_ihs_min_lambdas_without_delta = np.zeros((len(stn_tss), nlags + 1))
mean_increases = np.zeros((len(stn_tss), nlags + 1))
std_alphas = np.zeros((len(stn_tss), nlags + 1))
std_deltas = np.zeros((len(stn_tss), nlags + 1))
std_delta_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
std_one_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
std_min_eig_eye_pl_deltas = np.zeros((len(stn_tss), nlags + 1))
std_min_lmb_pl_1mlmb_alphas = np.zeros((len(stn_tss), nlags + 1))
std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
(len(stn_tss), nlags + 1)
)
std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
(len(stn_tss), nlags + 1)
)
std_stn_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
std_bound_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
std_bound_min_lambdas_without_delta_ineq = np.zeros((len(stn_tss), nlags + 1))
std_ihs_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
std_ihs_min_lambdas_without_delta = np.zeros((len(stn_tss), nlags + 1))
std_increases = np.zeros((len(stn_tss), nlags + 1))
for idx, (standard_ts, ihs_ts) in enumerate(zip(stn_tss, ihs_tss)):
standard_mcovs = [None] * len(begins)
ihs_mcovs = [None] * len(begins)
for ts, mcovs in zip([standard_ts, ihs_ts], [standard_mcovs, ihs_mcovs]):
for i, (begin, end) in enumerate(zip(begins, ends)):
acov = acovf(ts[begin:end], fft=False)[:nlags]
acov /= acov[0]
mcovs[i] = cov_matrix(acov)
for lags in range(2, nlags + 1):
alphas = []
deltas = []
delta_over_one_pl_delta = []
one_over_one_pl_delta = []
min_eig_eye_pl_deltas = []
min_lmb_pl_1mlmb_alphas = []
one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = []
min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = []
stn_min_lambdas = []
bound_min_lambdas = []
bound_min_lambdas_without_delta_ineq = []
ihs_min_lambdas = []
ihs_min_lambdas_without_delta = []
increases = []
for i in range(len(begins)):
eye = np.eye(lags)
stn_mcov = standard_mcovs[i][:lags, :lags]
ihs_mcov = ihs_mcovs[i][:lags, :lags]
x = np.sum(stn_mcov - eye, axis=1)
y = np.sum(ihs_mcov - eye, axis=1)
alpha = OLS(y, x).fit().params[0]
delta = ihs_mcov - eye - alpha * (stn_mcov - eye)
delta_norm = np.linalg.norm(delta, ord=2)
min_lambda = np.min(np.linalg.eigvals(stn_mcov))
delta_over_1_pl_delta = delta_norm / (1 + delta_norm)
one_over_1_pl_delta = 1 / (1 + delta_norm)
increase = (1 - alpha) * (1 - min_lambda)
min_eig_eye_pl_delta = np.min(np.linalg.eigvals(eye + delta))
min_lmb_pl_1mlmb_alpha = min_lambda + (1 - min_lambda) * alpha
alphas.append(alpha)
deltas.append(delta_norm)
delta_over_one_pl_delta.append(delta_over_1_pl_delta)
one_over_one_pl_delta.append(one_over_1_pl_delta)
min_eig_eye_pl_deltas.append(min_eig_eye_pl_delta)
min_lmb_pl_1mlmb_alphas.append(min_lmb_pl_1mlmb_alpha)
one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.append(
one_over_1_pl_delta - min_lmb_pl_1mlmb_alpha
)
min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.append(
# np.linalg.norm(np.linalg.inv(eye + delta), ord=2)
min_eig_eye_pl_delta
- min_lmb_pl_1mlmb_alpha
)
stn_min_lambdas.append(min_lambda)
bound_min_lambdas.append(min_lambda + increase - delta_over_1_pl_delta)
bound_min_lambdas_without_delta_ineq.append(
alpha * min_lambda - alpha + min_eig_eye_pl_delta
)
ihs_min_lambdas.append(np.min(np.linalg.eigvals(ihs_mcov)))
ihs_min_lambdas_without_delta.append(
np.min(np.linalg.eigvals(eye + alpha * (stn_mcov - eye)))
)
increases.append(increase)
mean_alphas[idx][lags] = np.mean(alphas)
mean_deltas[idx][lags] = np.mean(deltas)
mean_delta_over_one_pl_delta[idx][lags] = np.mean(delta_over_one_pl_delta)
mean_one_over_one_pl_delta[idx][lags] = np.mean(one_over_one_pl_delta)
mean_min_eig_eye_pl_deltas[idx][lags] = np.mean(min_eig_eye_pl_deltas)
mean_min_lmb_pl_1mlmb_alphas[idx][lags] = np.mean(min_lmb_pl_1mlmb_alphas)
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas[idx][
lags
] = np.mean(one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas)
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas[idx][
lags
] = np.mean(min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas)
mean_stn_min_lambdas[idx][lags] = np.mean(stn_min_lambdas)
mean_bound_min_lambdas[idx][lags] = np.mean(bound_min_lambdas)
mean_bound_min_lambdas_without_delta_ineq[idx][lags] = np.mean(
bound_min_lambdas_without_delta_ineq
)
mean_ihs_min_lambdas[idx][lags] = np.mean(ihs_min_lambdas)
mean_ihs_min_lambdas_without_delta[idx][lags] = np.mean(
ihs_min_lambdas_without_delta
)
mean_increases[idx][lags] = np.mean(increases)
std_alphas[idx][lags] = np.std(alphas)
std_deltas[idx][lags] = np.std(deltas)
std_delta_over_one_pl_delta[idx][lags] = np.std(delta_over_one_pl_delta)
std_one_over_one_pl_delta[idx][lags] = np.std(one_over_one_pl_delta)
std_min_eig_eye_pl_deltas[idx][lags] = np.std(min_eig_eye_pl_deltas)
std_min_lmb_pl_1mlmb_alphas[idx][lags] = np.std(min_lmb_pl_1mlmb_alphas)
std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas[idx][lags] = np.std(
one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
)
std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas[idx][lags] = np.std(
min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
)
std_stn_min_lambdas[idx][lags] = np.std(stn_min_lambdas)
std_bound_min_lambdas[idx][lags] = np.std(bound_min_lambdas)
std_bound_min_lambdas_without_delta_ineq[idx][lags] = np.std(
bound_min_lambdas_without_delta_ineq
)
std_ihs_min_lambdas[idx][lags] = np.std(ihs_min_lambdas)
std_ihs_min_lambdas_without_delta[idx][lags] = np.std(
ihs_min_lambdas_without_delta
)
std_increases[idx][lags] = np.std(increases)
return (
(
pd.Series(np.mean(mean_alphas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.mean(std_alphas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(np.mean(mean_deltas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.mean(std_deltas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_delta_over_one_pl_delta, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_delta_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_one_over_one_pl_delta, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_one_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_min_eig_eye_pl_deltas, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_min_eig_eye_pl_deltas, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_min_lmb_pl_1mlmb_alphas, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_min_lmb_pl_1mlmb_alphas, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[
2:
],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(
np.mean(mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[
2:
],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(np.mean(mean_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.mean(std_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)
),
pd.Series(np.mean(std_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_bound_min_lambdas_without_delta_ineq, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_bound_min_lambdas_without_delta_ineq, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(np.mean(mean_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.mean(std_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_ihs_min_lambdas_without_delta, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.mean(std_ihs_min_lambdas_without_delta, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(np.mean(mean_increases, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.mean(std_increases, 0)[2:], index=range(2, nlags + 1)),
),
)
nlags = 40
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
(
(mean_alphas, std_alphas),
(mean_deltas, std_deltas),
(mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
(mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
(mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
(mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
(
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
),
(
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
),
(mean_stn_min_lambdas, std_stn_min_lambdas),
(mean_bound_min_lambdas, std_bound_min_lambdas),
(
mean_bound_min_lambdas_without_delta_ineq,
std_bound_min_lambdas_without_delta_ineq,
),
(mean_ihs_min_lambdas, std_ihs_min_lambdas),
(mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
(mean_increases, std_increases),
) = mean_of_alphas_and_deltas(nlags, ts_idx=5)
std_bar_alphas = np.transpose(
np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
np.array(
[
mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
]
)
)
std_bar_one_over_one_pl_delta = np.transpose(
np.array(
[
mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
]
)
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
]
)
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
- std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
+ std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
- std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
+ std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_stn_min_lambdas = np.transpose(
np.array(
[
mean_stn_min_lambdas - std_stn_min_lambdas,
mean_stn_min_lambdas + std_stn_min_lambdas,
]
)
)
std_bar_bound_min_lambdas = np.transpose(
np.array(
[
mean_bound_min_lambdas - std_bound_min_lambdas,
mean_bound_min_lambdas + std_bound_min_lambdas,
]
)
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
np.array(
[
mean_bound_min_lambdas_without_delta_ineq
- std_bound_min_lambdas_without_delta_ineq,
mean_bound_min_lambdas_without_delta_ineq
+ std_bound_min_lambdas_without_delta_ineq,
]
)
)
std_bar_ihs_min_lambdas = np.transpose(
np.array(
[
mean_ihs_min_lambdas - std_ihs_min_lambdas,
mean_ihs_min_lambdas + std_ihs_min_lambdas,
]
)
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
np.array(
[
mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
]
)
)
std_bar_increases = np.transpose(
np.array([mean_increases - std_increases, mean_increases + std_increases])
)
kwargs = dict(
xtitle="N (matrix size)",
calc_xticks=True,
fill_along_axis=False,
fill_only_positive=False,
)
fig = plot_stats(
mean_alphas.values,
xs=mean_alphas.index.values,
conf_intvs=std_bar_alphas,
name="Average alpha",
color="darkblue",
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
plot_stats(
mean_deltas.values,
xs=mean_deltas.index.values,
conf_intvs=std_bar_deltas,
name="Average 2-norm of delta",
color="tab:red",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_delta_over_one_plus_delta.values,
xs=mean_delta_over_one_plus_delta.index.values,
conf_intvs=std_bar_delta_over_one_plus_delta,
name="delta / (1 + delta)",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_one_over_one_pl_delta.values,
xs=mean_one_over_one_pl_delta.index.values,
conf_intvs=std_bar_one_over_one_pl_delta,
name="1 / (1 + delta)",
color="pink",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_min_eig_eye_pl_deltas.values,
xs=mean_min_eig_eye_pl_deltas.index.values,
conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="min_eig_(eye_pl_delta)",
color="tab:cyan",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_min_lmb_pl_1mlmb_alphas.values,
xs=mean_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_min_lmb_pl_1mlmb_alphas,
name="min_lmb + (1 - min_lmb) * alphas",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
fig = plot_stats(
mean_stn_min_lambdas.values,
xs=mean_stn_min_lambdas.index.values,
conf_intvs=std_bar_stn_min_lambdas,
name="stn_min_lambdas",
color="darkblue",
yscale="log",
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_ihs_min_lambdas_without_delta.values,
xs=mean_ihs_min_lambdas_without_delta.index.values,
conf_intvs=std_bar_ihs_min_lambdas_without_delta,
name="ihs_min_lambdas_without_delta",
color="tab:red",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_increases.values,
xs=mean_increases.index.values,
conf_intvs=std_bar_increases,
name="increases",
color="gray",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_bound_min_lambdas.values,
xs=mean_bound_min_lambdas.index.values,
conf_intvs=std_bar_bound_min_lambdas,
name="bound_min_lambdas",
color="tab:green",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_bound_min_lambdas_without_delta_ineq.values,
xs=mean_bound_min_lambdas_without_delta_ineq.index.values,
conf_intvs=std_bar_bound_min_lambdas_without_delta_ineq,
name="bound_min_lambdas_without_delta_ineq",
color="tab:cyan",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_ihs_min_lambdas.values,
xs=mean_ihs_min_lambdas.index.values,
conf_intvs=std_bar_ihs_min_lambdas,
name="ihs_min_lambdas",
color="tab:orange",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
display(fig)
nlags = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
(
(mean_alphas, std_alphas),
(mean_deltas, std_deltas),
(mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
(mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
(mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
(mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
(
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
),
(
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
),
(mean_stn_min_lambdas, std_stn_min_lambdas),
(mean_bound_min_lambdas, std_bound_min_lambdas),
(
mean_bound_min_lambdas_without_delta_ineq,
std_bound_min_lambdas_without_delta_ineq,
),
(mean_ihs_min_lambdas, std_ihs_min_lambdas),
(mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
(mean_increases, std_increases),
) = mean_of_alphas_and_deltas(nlags, ts_idx=5)
std_bar_alphas = np.transpose(
np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
np.array(
[
mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
]
)
)
std_bar_one_over_one_pl_delta = np.transpose(
np.array(
[
mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
]
)
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
]
)
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
- std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
+ std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
- std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
+ std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_stn_min_lambdas = np.transpose(
np.array(
[
mean_stn_min_lambdas - std_stn_min_lambdas,
mean_stn_min_lambdas + std_stn_min_lambdas,
]
)
)
std_bar_bound_min_lambdas = np.transpose(
np.array(
[
mean_bound_min_lambdas - std_bound_min_lambdas,
mean_bound_min_lambdas + std_bound_min_lambdas,
]
)
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
np.array(
[
mean_bound_min_lambdas_without_delta_ineq
- std_bound_min_lambdas_without_delta_ineq,
mean_bound_min_lambdas_without_delta_ineq
+ std_bound_min_lambdas_without_delta_ineq,
]
)
)
std_bar_ihs_min_lambdas = np.transpose(
np.array(
[
mean_ihs_min_lambdas - std_ihs_min_lambdas,
mean_ihs_min_lambdas + std_ihs_min_lambdas,
]
)
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
np.array(
[
mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
]
)
)
std_bar_increases = np.transpose(
np.array([mean_increases - std_increases, mean_increases + std_increases])
)
fig = plot_stats(
mean_alphas.values,
xs=mean_alphas.index.values,
conf_intvs=std_bar_alphas,
# fill_along_axis=False,
name="$\\alpha_h$",
color="darkblue",
height=300,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_deltas.values,
xs=mean_deltas.index.values,
conf_intvs=std_bar_deltas,
# fill_along_axis=False,
name="$||\Delta_h||_2$",
color="tab:orange",
fig=fig,
# height=300,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)
fig = plot_ts(
mean_min_lmb_pl_1mlmb_alphas,
# std=std_bar_min_lmb_pl_1mlmb_alphas,
name="$\lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
color="darkblue",
height=600,
# fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_min_eig_eye_pl_deltas,
# conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="$\lambda_{1}(I_h + \Delta_h)$",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_one_over_one_pl_delta,
# conf_intvs=std_bar_one_over_one_pl_delta,
name="$\dfrac{1}{1 + ||\Delta_h||}$",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.8)
display(fig)
std_bar_min_lmb_pl_1mlmb_alphas
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)
fig = plot_stats(
mean_min_lmb_pl_1mlmb_alphas.values,
xs=mean_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_min_lmb_pl_1mlmb_alphas,
fill_along_axis=False,
name="$\lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
color="darkblue",
conf_alpha=0.1,
height=600,
# fig=fig,
**kwargs
)
fig = plot_stats(
mean_min_eig_eye_pl_deltas.values,
xs=mean_min_eig_eye_pl_deltas.index.values,
conf_intvs=std_bar_min_eig_eye_pl_deltas,
fill_along_axis=False,
name="$\lambda_{1}(I_h + \Delta_h)$",
color="tab:orange",
conf_alpha=0.1,
fig=fig,
**kwargs
)
fig = plot_stats(
mean_one_over_one_pl_delta.values,
xs=mean_one_over_one_pl_delta.index.values,
conf_intvs=std_bar_one_over_one_pl_delta,
fill_along_axis=False,
name="$\dfrac{1}{1 + ||\Delta_h||}$",
color="tab:green",
conf_alpha=0.1,
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.8)
display(fig)
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)
fig = plot_stats(
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.values,
xs=mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
# fill_along_axis=False,
name="$\lambda_{1}(I_h + \Delta_h) - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
color="darkblue",
# conf_alpha=0.1,
height=600,
# fig=fig,
**kwargs
)
fig = plot_stats(
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.values,
xs=mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
# fill_along_axis=False,
name="$\dfrac{1}{1 + ||\Delta_h||} - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
color="tab:orange",
# conf_alpha=0.1,
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
# ax.set_ylim(bottom=0.8)
display(fig)
kwargs = dict(
xtitle="h (matrix size)",
calc_xticks=True,
major_xstep=1,
height=500,
)
fig = plot_ts(
pd.Series(data=0, index=mean_min_lmb_pl_1mlmb_alphas.index),
# std=std_bar_min_lmb_pl_1mlmb_alphas,
name="",
color="darkblue",
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_min_eig_eye_pl_deltas - mean_min_lmb_pl_1mlmb_alphas,
# conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="$\lambda_{1}(I + \Delta) - \lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_one_over_one_pl_delta - mean_min_lmb_pl_1mlmb_alphas,
# conf_intvs=std_bar_one_over_one_pl_delta,
name="$\dfrac{1}{1 + ||\Delta||} - \lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
ax.set_ylim(bottom=-0.05, top=0.05)
display(fig)
# averaged_mean_of_alphas_and_deltas
def averaged_mean_of_alphas_and_deltas(nlags):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
stn_tss = standard_trans_ts
ihs_tss = ihs_trans_ts
mean_alphas = np.zeros((len(stn_tss), nlags + 1))
mean_deltas = np.zeros((len(stn_tss), nlags + 1))
mean_delta_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
mean_one_over_one_pl_delta = np.zeros((len(stn_tss), nlags + 1))
mean_min_eig_eye_pl_deltas = np.zeros((len(stn_tss), nlags + 1))
mean_min_lmb_pl_1mlmb_alphas = np.zeros((len(stn_tss), nlags + 1))
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
(len(stn_tss), nlags + 1)
)
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.zeros(
(len(stn_tss), nlags + 1)
)
mean_stn_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
mean_bound_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
mean_bound_min_lambdas_without_delta_ineq = np.zeros((len(stn_tss), nlags + 1))
mean_ihs_min_lambdas = np.zeros((len(stn_tss), nlags + 1))
mean_ihs_min_lambdas_without_delta = np.zeros((len(stn_tss), nlags + 1))
mean_increases = np.zeros((len(stn_tss), nlags + 1))
for idx, (standard_ts, ihs_ts) in enumerate(zip(stn_tss, ihs_tss)):
standard_mcovs = [None] * len(begins)
ihs_mcovs = [None] * len(begins)
for ts, mcovs in zip([standard_ts, ihs_ts], [standard_mcovs, ihs_mcovs]):
for i, (begin, end) in enumerate(zip(begins, ends)):
acov = acovf(ts[begin:end], fft=False)[:nlags]
acov /= acov[0]
mcovs[i] = cov_matrix(acov)
for lags in range(2, nlags + 1):
alphas = []
deltas = []
delta_over_one_pl_delta = []
one_over_one_pl_delta = []
min_eig_eye_pl_deltas = []
min_lmb_pl_1mlmb_alphas = []
one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = []
min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = []
stn_min_lambdas = []
bound_min_lambdas = []
bound_min_lambdas_without_delta_ineq = []
ihs_min_lambdas = []
ihs_min_lambdas_without_delta = []
increases = []
for i in range(len(begins)):
eye = np.eye(lags)
stn_mcov = standard_mcovs[i][:lags, :lags]
ihs_mcov = ihs_mcovs[i][:lags, :lags]
x = np.sum(stn_mcov - eye, axis=1)
y = np.sum(ihs_mcov - eye, axis=1)
alpha = OLS(y, x).fit().params[0]
delta = ihs_mcov - eye - alpha * (stn_mcov - eye)
delta_norm = np.linalg.norm(delta, ord=2)
min_lambda = np.min(np.abs(np.linalg.eigvals(stn_mcov)))
delta_over_1_pl_delta = delta_norm / (1 + delta_norm)
one_over_1_pl_delta = 1 / (1 + delta_norm)
increase = (1 - alpha) * (1 - min_lambda)
min_eig_eye_pl_delta = np.min(np.abs(np.linalg.eigvals(eye + delta)))
min_lmb_pl_1mlmb_alpha = min_lambda + (1 - min_lambda) * alpha
alphas.append(alpha)
deltas.append(delta_norm)
delta_over_one_pl_delta.append(delta_over_1_pl_delta)
one_over_one_pl_delta.append(one_over_1_pl_delta)
min_eig_eye_pl_deltas.append(min_eig_eye_pl_delta)
min_lmb_pl_1mlmb_alphas.append(min_lmb_pl_1mlmb_alpha)
one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.append(
one_over_1_pl_delta - min_lmb_pl_1mlmb_alpha
)
min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.append(
min_eig_eye_pl_delta - min_lmb_pl_1mlmb_alpha
)
stn_min_lambdas.append(min_lambda)
bound_min_lambdas.append(min_lambda + increase - delta_over_1_pl_delta)
bound_min_lambdas_without_delta_ineq.append(
alpha * min_lambda - alpha + min_eig_eye_pl_delta
)
ihs_min_lambdas.append(np.min(np.linalg.eigvals(ihs_mcov)))
ihs_min_lambdas_without_delta.append(
np.min(np.linalg.eigvals(eye + alpha * (stn_mcov - eye)))
)
increases.append(increase)
mean_alphas[idx][lags] = np.mean(alphas)
mean_deltas[idx][lags] = np.mean(deltas)
mean_delta_over_one_pl_delta[idx][lags] = np.mean(delta_over_one_pl_delta)
mean_one_over_one_pl_delta[idx][lags] = np.mean(one_over_one_pl_delta)
mean_min_eig_eye_pl_deltas[idx][lags] = np.mean(min_eig_eye_pl_deltas)
mean_min_lmb_pl_1mlmb_alphas[idx][lags] = np.mean(min_lmb_pl_1mlmb_alphas)
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas[idx][
lags
] = np.mean(one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas)
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas[idx][
lags
] = np.mean(min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas)
mean_stn_min_lambdas[idx][lags] = np.mean(stn_min_lambdas)
mean_bound_min_lambdas[idx][lags] = np.mean(bound_min_lambdas)
mean_bound_min_lambdas_without_delta_ineq[idx][lags] = np.mean(
bound_min_lambdas_without_delta_ineq
)
mean_ihs_min_lambdas[idx][lags] = np.mean(ihs_min_lambdas)
mean_ihs_min_lambdas_without_delta[idx][lags] = np.mean(
ihs_min_lambdas_without_delta
)
mean_increases[idx][lags] = np.mean(increases)
return (
(
pd.Series(np.mean(mean_alphas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.std(mean_alphas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(np.mean(mean_deltas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.std(mean_deltas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_delta_over_one_pl_delta, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_delta_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_one_over_one_pl_delta, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_one_over_one_pl_delta, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_min_eig_eye_pl_deltas, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_min_eig_eye_pl_deltas, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_min_lmb_pl_1mlmb_alphas, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_min_lmb_pl_1mlmb_alphas, 0)[2:], index=range(2, nlags + 1)
),
),
(
pd.Series(
np.mean(mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[
2:
],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(
np.mean(mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[
2:
],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(np.mean(mean_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.std(mean_stn_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)
),
pd.Series(np.std(mean_bound_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_bound_min_lambdas_without_delta_ineq, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_bound_min_lambdas_without_delta_ineq, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(np.mean(mean_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.std(mean_ihs_min_lambdas, 0)[2:], index=range(2, nlags + 1)),
),
(
pd.Series(
np.mean(mean_ihs_min_lambdas_without_delta, 0)[2:],
index=range(2, nlags + 1),
),
pd.Series(
np.std(mean_ihs_min_lambdas_without_delta, 0)[2:],
index=range(2, nlags + 1),
),
),
(
pd.Series(np.mean(mean_increases, 0)[2:], index=range(2, nlags + 1)),
pd.Series(np.std(mean_increases, 0)[2:], index=range(2, nlags + 1)),
),
)
nlags = 40
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
(
(mean_alphas, std_alphas),
(mean_deltas, std_deltas),
(mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
(mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
(mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
(mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
(
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
),
(
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
),
(mean_stn_min_lambdas, std_stn_min_lambdas),
(mean_bound_min_lambdas, std_bound_min_lambdas),
(
mean_bound_min_lambdas_without_delta_ineq,
std_bound_min_lambdas_without_delta_ineq,
),
(mean_ihs_min_lambdas, std_ihs_min_lambdas),
(mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
(mean_increases, std_increases),
) = averaged_mean_of_alphas_and_deltas(nlags)
std_bar_alphas = np.transpose(
np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
np.array(
[
mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
]
)
)
std_bar_one_over_one_pl_delta = np.transpose(
np.array(
[
mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
]
)
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
]
)
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
- std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
+ std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
- std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
+ std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_stn_min_lambdas = np.transpose(
np.array(
[
mean_stn_min_lambdas - std_stn_min_lambdas,
mean_stn_min_lambdas + std_stn_min_lambdas,
]
)
)
std_bar_bound_min_lambdas = np.transpose(
np.array(
[
mean_bound_min_lambdas - std_bound_min_lambdas,
mean_bound_min_lambdas + std_bound_min_lambdas,
]
)
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
np.array(
[
mean_bound_min_lambdas_without_delta_ineq
- std_bound_min_lambdas_without_delta_ineq,
mean_bound_min_lambdas_without_delta_ineq
+ std_bound_min_lambdas_without_delta_ineq,
]
)
)
std_bar_ihs_min_lambdas = np.transpose(
np.array(
[
mean_ihs_min_lambdas - std_ihs_min_lambdas,
mean_ihs_min_lambdas + std_ihs_min_lambdas,
]
)
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
np.array(
[
mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
]
)
)
std_bar_increases = np.transpose(
np.array([mean_increases - std_increases, mean_increases + std_increases])
)
kwargs = dict(
xtitle="N (matrix size)",
height=600,
calc_xticks=True,
fill_along_axis=False,
fill_only_positive=False,
)
fig = plot_stats(
mean_alphas.values,
xs=mean_alphas.index.values,
conf_intvs=std_bar_alphas,
name="Average alpha",
color="darkblue",
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
plot_stats(
mean_deltas.values,
xs=mean_deltas.index.values,
conf_intvs=std_bar_deltas,
name="Average 2-norm of delta",
color="tab:red",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_delta_over_one_plus_delta.values,
xs=mean_delta_over_one_plus_delta.index.values,
conf_intvs=std_bar_delta_over_one_plus_delta,
name="delta / (1 + delta)",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_one_over_one_pl_delta.values,
xs=mean_one_over_one_pl_delta.index.values,
conf_intvs=std_bar_one_over_one_pl_delta,
name="1 / (1 + delta)",
color="pink",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_min_eig_eye_pl_deltas.values,
xs=mean_min_eig_eye_pl_deltas.index.values,
conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="min_eig_(eye_pl_delta)",
color="tab:cyan",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_min_lmb_pl_1mlmb_alphas.values,
xs=mean_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_min_lmb_pl_1mlmb_alphas,
name="min_lmb + (1 - min_lmb) * alphas",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
fig = plot_stats(
mean_stn_min_lambdas.values,
xs=mean_stn_min_lambdas.index.values,
conf_intvs=std_bar_stn_min_lambdas,
name="stn_min_lambdas",
color="darkblue",
yscale="log",
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_ihs_min_lambdas_without_delta.values,
xs=mean_ihs_min_lambdas_without_delta.index.values,
conf_intvs=std_bar_ihs_min_lambdas_without_delta,
name="ihs_min_lambdas_without_delta",
color="tab:red",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_increases.values,
xs=mean_increases.index.values,
conf_intvs=std_bar_increases,
name="increases",
color="gray",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_stats(
mean_bound_min_lambdas.values,
xs=mean_bound_min_lambdas.index.values,
conf_intvs=std_bar_bound_min_lambdas,
name="bound_min_lambdas",
color="tab:green",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_bound_min_lambdas_without_delta_ineq.values,
xs=mean_bound_min_lambdas_without_delta_ineq.index.values,
conf_intvs=std_bar_bound_min_lambdas_without_delta_ineq,
name="bound_min_lambdas_without_delta_ineq",
color="tab:cyan",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
fig = plot_stats(
mean_ihs_min_lambdas.values,
xs=mean_ihs_min_lambdas.index.values,
conf_intvs=std_bar_ihs_min_lambdas,
name="ihs_min_lambdas",
color="tab:orange",
yscale="log",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
display(fig)
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)
fig = plot_ts(
mean_min_lmb_pl_1mlmb_alphas,
# std=std_bar_min_lmb_pl_1mlmb_alphas,
name="$\lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
color="darkblue",
height=800,
# fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_min_eig_eye_pl_deltas,
# conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="$\lambda_{1}(I + \Delta)$",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_one_over_one_pl_delta,
# conf_intvs=std_bar_one_over_one_pl_delta,
name="$\dfrac{1}{1 + ||\Delta||}$",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)
fig = plot_ts(
mean_min_lmb_pl_1mlmb_alphas,
# std=std_bar_min_lmb_pl_1mlmb_alphas,
name="$\lambda_{1}(R) + \\alpha(1 - \lambda_1(R))$",
color="darkblue",
height=800,
# fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_min_eig_eye_pl_deltas,
# conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="$\lambda_{1}(I + \Delta)$",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_one_over_one_pl_delta,
# conf_intvs=std_bar_one_over_one_pl_delta,
name="$\dfrac{1}{1 + ||\Delta||}$",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
nlags = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
(
(mean_alphas, std_alphas),
(mean_deltas, std_deltas),
(mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
(mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
(mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
(mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
(mean_stn_min_lambdas, std_stn_min_lambdas),
(mean_bound_min_lambdas, std_bound_min_lambdas),
(
mean_bound_min_lambdas_without_delta_ineq,
std_bound_min_lambdas_without_delta_ineq,
),
(mean_ihs_min_lambdas, std_ihs_min_lambdas),
(mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
(mean_increases, std_increases),
) = averaged_mean_of_alphas_and_deltas(nlags)
nlags = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
(
(mean_alphas, std_alphas),
(mean_deltas, std_deltas),
(mean_delta_over_one_plus_delta, std_delta_over_one_plus_delta),
(mean_one_over_one_pl_delta, std_one_over_one_pl_delta),
(mean_min_eig_eye_pl_deltas, std_min_eig_eye_pl_deltas),
(mean_min_lmb_pl_1mlmb_alphas, std_min_lmb_pl_1mlmb_alphas),
(
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
),
(
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
),
(mean_stn_min_lambdas, std_stn_min_lambdas),
(mean_bound_min_lambdas, std_bound_min_lambdas),
(
mean_bound_min_lambdas_without_delta_ineq,
std_bound_min_lambdas_without_delta_ineq,
),
(mean_ihs_min_lambdas, std_ihs_min_lambdas),
(mean_ihs_min_lambdas_without_delta, std_ihs_min_lambdas_without_delta),
(mean_increases, std_increases),
) = averaged_mean_of_alphas_and_deltas(nlags)
std_bar_alphas = np.transpose(
np.array([mean_alphas - std_alphas, mean_alphas + std_alphas])
)
std_bar_deltas = np.transpose(
np.array([mean_deltas - std_deltas, mean_deltas + std_deltas])
)
std_bar_delta_over_one_plus_delta = np.transpose(
np.array(
[
mean_delta_over_one_plus_delta - std_delta_over_one_plus_delta,
mean_delta_over_one_plus_delta + std_delta_over_one_plus_delta,
]
)
)
std_bar_one_over_one_pl_delta = np.transpose(
np.array(
[
mean_one_over_one_pl_delta - std_one_over_one_pl_delta,
mean_one_over_one_pl_delta + std_one_over_one_pl_delta,
]
)
)
std_bar_min_eig_eye_pl_deltas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas - std_min_eig_eye_pl_deltas,
mean_min_eig_eye_pl_deltas + std_min_eig_eye_pl_deltas,
]
)
)
std_bar_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_lmb_pl_1mlmb_alphas - std_min_lmb_pl_1mlmb_alphas,
mean_min_lmb_pl_1mlmb_alphas + std_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
- std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas
+ std_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas = np.transpose(
np.array(
[
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
- std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas
+ std_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
]
)
)
std_bar_stn_min_lambdas = np.transpose(
np.array(
[
mean_stn_min_lambdas - std_stn_min_lambdas,
mean_stn_min_lambdas + std_stn_min_lambdas,
]
)
)
std_bar_bound_min_lambdas = np.transpose(
np.array(
[
mean_bound_min_lambdas - std_bound_min_lambdas,
mean_bound_min_lambdas + std_bound_min_lambdas,
]
)
)
std_bar_bound_min_lambdas_without_delta_ineq = np.transpose(
np.array(
[
mean_bound_min_lambdas_without_delta_ineq
- std_bound_min_lambdas_without_delta_ineq,
mean_bound_min_lambdas_without_delta_ineq
+ std_bound_min_lambdas_without_delta_ineq,
]
)
)
std_bar_ihs_min_lambdas = np.transpose(
np.array(
[
mean_ihs_min_lambdas - std_ihs_min_lambdas,
mean_ihs_min_lambdas + std_ihs_min_lambdas,
]
)
)
std_bar_ihs_min_lambdas_without_delta = np.transpose(
np.array(
[
mean_ihs_min_lambdas_without_delta - std_ihs_min_lambdas_without_delta,
mean_ihs_min_lambdas_without_delta + std_ihs_min_lambdas_without_delta,
]
)
)
std_bar_increases = np.transpose(
np.array([mean_increases - std_increases, mean_increases + std_increases])
)
kwargs = dict(
xtitle="h (matrix size)",
calc_xticks=True,
major_xstep=1,
height=300,
)
fig = plot_ts(
mean_min_lmb_pl_1mlmb_alphas,
# std=std_bar_min_lmb_pl_1mlmb_alphas,
name="$\lambda_{1}(R_h) + \\alpha(1 - \lambda_1(R_h))$",
color="darkblue",
# height=800,
# fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_min_eig_eye_pl_deltas,
# conf_intvs=std_bar_min_eig_eye_pl_deltas,
name="$\lambda_{1}(I_h + \Delta_h)$",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
fig = plot_ts(
mean_one_over_one_pl_delta,
# conf_intvs=std_bar_one_over_one_pl_delta,
name="$\dfrac{1}{1 + ||\Delta_h||}$",
color="tab:green",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
kwargs = dict(xtitle="h (matrix size)", calc_xticks=True, major_xstep=1)
fig = plot_stats(
mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.values,
xs=mean_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_min_eig_eye_pl_deltas_minus_min_lmb_pl_1mlmb_alphas,
# fill_along_axis=False,
name="$\lambda_{1}(I_h + \Delta_h) - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
color="darkblue",
# conf_alpha=0.1,
height=300,
# fig=fig,
**kwargs
)
fig = plot_stats(
mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.values,
xs=mean_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas.index.values,
conf_intvs=std_bar_one_over_one_pl_delta_minus_min_lmb_pl_1mlmb_alphas,
# fill_along_axis=False,
name="$\dfrac{1}{1 + ||\Delta_h||} - \lambda_{1}(R_h) + \\alpha_h(1 - \lambda_1(R_h))$",
color="tab:orange",
# conf_alpha=0.1,
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
ax = fig.get_axes()[0]
# ax.set_ylim(bottom=0.8)
display(fig)
kwargs = dict(
xtitle="h (matrix size)",
height=300,
calc_xticks=True,
fill_along_axis=False,
fill_only_positive=False,
)
fig = plot_stats(
mean_alphas.values,
xs=mean_alphas.index.values,
conf_intvs=std_bar_alphas,
name="$\\alpha_h$",
color="darkblue",
**kwargs
)
ax_settings(fig=fig, showgrid=True)
# display(fig)
plot_stats(
mean_deltas.values,
xs=mean_deltas.index.values,
conf_intvs=std_bar_deltas,
name="$||\Delta||_2$",
color="tab:orange",
fig=fig,
**kwargs
)
ax_settings(fig=fig, showgrid=True)
display(fig)
# autocorr, circ_matrix, dft, eigenvalues and their bounds
def autocorr(ts, nlags):
acv = acovf(ts, fft=False, nlag=nlags)
acv /= acv[0]
return acv
def autocorr_matrix(ts, nlags):
return cov_matrix(autocorr(ts, nlags))
def circ_matrix(first_row):
n = len(first_row)
mat = np.zeros((n, n))
for i in range(n):
if i > 0:
mat[i, :i] = first_row[-i:]
mat[i, i:] = first_row[: n - i]
return mat
def circ_matrix_from_autocorr(
acf,
alphas=None,
m=None,
len_pow_of_2=False,
):
n = len(acf)
if m is None:
if alphas is not None:
m = 2 * n + len(alphas) - 1
elif len_pow_of_2:
m = 1 << n.bit_length()
else:
m = 2 * n
first_row = np.zeros(m)
first_row[:n] = acf
if alphas is not None:
first_row[n : m - n + 1] = alphas
first_row[m - n + 1 :] = np.flipud(acf[1:])
return circ_matrix(first_row)
def dft(x):
"""
Function to calculate the
discrete Fourier Transform
of a 1D real-valued signal x
"""
N = len(x)
n = np.arange(N)
k = n.reshape((N, 1))
e = np.exp(-2j * np.pi * k * n / N)
X = np.dot(e, x)
return X
def dft_matrix(n):
X = np.zeros((n, n), dtype=np.complex)
for j in range(n):
for k in range(n):
X[j, k] = np.exp((2j * np.pi / n) * j * k)
return X
def sorted_eig_values(matrix):
return np.sort(np.real(np.linalg.eigvals(matrix)))
%%html
<!--hide_me_please-->
<p class="showcode" id="012345gfdcvbjtaa"><a href="javascript:code_toggle('#012345gfdcvbjt')">show codes in the notebook</a></p>
<p class="hidecode" id="012345gfdcvbjt"><a href="javascript:code_toggle('#012345gfdcvbjtaa')">hide codes in the notebook</a></p>
# https://doi.org/10.1016/S1474-6670(17)47877-3
# Ferreira's bounds on each eigenvalue of autocorrelation matrix
# based od circulant emebedding
def ferreira_bounds_of_autocorr_eigvals(acf):
c = circ_matrix_from_autocorr(acf)[0, :]
n = len(c)
# print(n)
X = dft_matrix(n)
c_eig_vals = np.real(X @ c)
# print(c_eig_vals)
n = len(c_eig_vals) // 2
odd_c_eig_vals = np.array(sorted([c_eig_vals[2 * i + 1] for i in range(n)]))
even_c_eig_vals = np.array(sorted([c_eig_vals[2 * i] for i in range(n)]))
bounds = np.zeros((2, n))
for k in range(n):
# lower
bounds[0, k] = 0.5 * (even_c_eig_vals[k] + odd_c_eig_vals[0])
# upper
bounds[1, k] = 0.5 * (even_c_eig_vals[k] + odd_c_eig_vals[n - 1])
return bounds
%%html
<!--hide_me_please-->
<p class="hidecode" id="qweertbbbbgdg123454"><a href="javascript:code_toggle('#qweertbbbbgdg123454')">hide codes in the notebook</a></p>
# ferreira's playground
acf_values = autocorr(ihs_trans_ts[5], nlags=16)
C = circ_matrix_from_autocorr(acf_values)
c = C[0, :]
print(c)
n = len(c)
print(n)
X = dft_matrix(n)
c_eig_vals = np.real(X @ c)
n = len(c_eig_vals) // 2
odd_eig_vals = np.array(sorted([c_eig_vals[2 * i + 1] for i in range(n)]))
even_eig_vals = np.array(sorted([c_eig_vals[2 * i] for i in range(n)]))
fig = plot_ts(
odd_eig_vals,
linestyle="",
marker="o",
xmargin=0.02,
)
display(fig)
fig = plot_ts(
even_eig_vals,
linestyle="",
marker="o",
xmargin=0.02,
)
display(fig)
print(f"bounds: {ferreira_bounds_of_autocorr_eigvals(acf_values)}")
T = C[:n, :n]
S = C[:n, n:]
# print(T)
# print(S)
TpS = T + S
TmS = T - S
print(f"eigvals of T + S: {sorted_eig_values(TpS)}")
print(f"eigvals of T - S: {sorted_eig_values(TmS)}")
%%html
<!--hide_me_please-->
<p class="hidecode" id="mnb34tgfddfghjkloiu"><a href="javascript:code_toggle('#12345gfdcvbjtaa')">hide codes in the notebook</a></p>
C = circ_matrix_from_autocorr([1, 0.6, 0.4, -0.1, -0.2, -0.4])
print(C)
sorted_eig_values(C)
x = np.zeros(2 * n - 3)
x[: n - 1] = acf[:-1][::-1]
x[n - 2 :] = acf[:-1]
print(x)
C = circ_matrix_from_autocorr(acf, alphas=x)
print(np.min(sorted_eig_values(C)))
print(C.shape)
(C == np.transpose(C)).all()
%%html
<!--hide_me_please-->
<p class="hidecode" id="qwe34tgfddfghjkloiu"><a href="javascript:code_toggle('#12345gfdcvbjtaa')">hide codes in the notebook</a></p>
# circ_matrix([1, 2, 3, 4])
# plot_eigenvalues_ferreira_stats
def plot_eigenvalues_ferreira_stats(ts_idx, nlags):
n = nlags + 1
ts = standard_trans_ts[ts_idx]
standard_acf = autocorr(ts, nlags=nlags)
standard_eig_vals = sorted_eig_values(cov_matrix(standard_acf))
ts = ihs_trans_ts[ts_idx]
ihs_acf = autocorr(ts, nlags=nlags)
ihs_eig_vals = sorted_eig_values(cov_matrix(ihs_acf))
fig = plot_stats(standard_acf, title="Autocorrelation", label="standard")
plot_stats(ihs_acf, color="tab:red", label="IHS", showgrid=True, fig=fig)
display(fig)
kwargs = dict(
title="Ferreira's Bounds on Eigenvalues of Autocorrelation Matrix",
subtitle="Autocorrelation matrix of size n x n",
xtitle="kth eigen value",
xmargin=0.02,
yscale="log",
)
fig = None
for eig_vals, acf, ts_type, color in [
(standard_eig_vals, standard_acf, "standard", "tab:blue"),
(ihs_eig_vals, ihs_acf, "IHS", "tab:red"),
]:
fig = plot_ts(
eig_vals,
color=color,
linestyle="",
marker="o",
name=ts_type,
showgrid=True,
**kwargs,
)
kwargs = dict(fig=fig)
ax = fig.get_axes()[0]
yerr = ferreira_bounds_of_autocorr_eigvals(acf)
print(f"bounds: {yerr}")
y = yerr[0, :].copy()
yerr[1, :] -= y
yerr[0, :] -= y
x = np.arange(len(y))
ax.errorbar(
x, y, yerr, fmt="none", color=color, linewidth=2.5, capsize=10, alpha=0.5
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="12345gfdcvbjtaa"><a href="javascript:code_toggle('#12345gfdcvbjt')">show codes in the notebook</a></p>
<p class="hidecode" id="12345gfdcvbjt"><a href="javascript:code_toggle('#12345gfdcvbjtaa')">hide codes in the notebook</a></p>
ts_idx = 5
nlags = 7
plot_eigenvalues_ferreira_stats(ts_idx, nlags)
%%html
<!--hide_me_please-->
<p class="showcode" id="qq12345gfdcvbjtaa"><a href="javascript:code_toggle('#qq12345gfdcvbjt')">show codes in the notebook</a></p>
<p class="hidecode" id="qq12345gfdcvbjt"><a href="javascript:code_toggle('#qq12345gfdcvbjtaa')">hide codes in the notebook</a></p>
ts_idx = 5
nlags = 16
plot_eigenvalues_ferreira_stats(ts_idx, nlags)
%%html
<!--hide_me_please-->
<p class="showcode" id="rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# plot_extr_eigenvalues_bounds
def plot_extr_eigenvalues_bounds(
ts_idx, nlags, name, bounds_fun, plot_autocorr=False, print_bounds=False
):
ts = standard_trans_ts[ts_idx]
standard_acf = autocorr(ts, nlags=nlags)
ts = ihs_trans_ts[ts_idx]
ihs_acf = autocorr(ts, nlags=nlags)
if plot_autocorr:
fig = plot_stats(standard_acf, title="Autocorrelation", label="standard")
plot_stats(ihs_acf, color="tab:red", label="IHS", showgrid=True, fig=fig)
display(fig)
bounds_of_max = {}
bounds_of_min = {}
for acf, ts_type in [
(standard_acf, "standard"),
(ihs_acf, "IHS"),
]:
bmax, bmin = bounds_fun(acf, nlags)
bounds_of_max[ts_type] = bmax
bounds_of_min[ts_type] = bmin
min_max_etr = "Extreme"
if bmax is None:
min_max_etr = "Minimal"
if bmin is None:
min_max_etr = "Maximal"
fig = None
kwargs = dict(
title=f"{name}'s Bounds on {min_max_etr} Eigenvalues of Autocorrelation Matrix",
calc_xticks=True,
major_xstep=np.ceil(nlags / 30),
xmargin=0.02,
yscale="log",
xtitle="lags (matrix of size: lags * lags)",
)
for acf, ts_type, color in [
(standard_acf, "standard", "tab:blue"),
(ihs_acf, "IHS", "tab:red"),
]:
min_eigvals = np.zeros(nlags)
max_eigvals = np.zeros(nlags)
for i, lags in enumerate(range(1, nlags + 1)):
# print(f"acf: {acf[:lags]}")
eigvals = sorted_eig_values(cov_matrix(acf[:lags]))
min_eigvals[i] = np.min(eigvals)
max_eigvals[i] = np.max(eigvals)
# print(f"min_eigvals: {min_eigvals}")
# print(f"max_eigvals: {max_eigvals}")
xs = np.arange(1, nlags + 1)
if bmin is not None:
fig = plot_ts(
min_eigvals,
index_values=xs,
color=color,
linestyle="",
marker="o",
name=ts_type,
showgrid=True,
**kwargs,
)
kwargs = dict(fig=fig)
if bmax is not None:
fig = plot_ts(
max_eigvals,
index_values=xs,
color=color,
linestyle="",
marker="o",
showgrid=True,
**kwargs,
)
kwargs = dict(fig=fig)
ax = fig.get_axes()[0]
bmax = bounds_of_max[ts_type]
bmin = bounds_of_min[ts_type]
if print_bounds:
print(f"bounds_of_max: {bmax}")
print(f"bounds_of_min: {bmin}")
for bounds in [bmax, bmin]:
if bounds is not None:
yerr = bounds.copy()
y = yerr[0, :].copy()
yerr[1, :] -= y
yerr[0, :] -= y
ax.errorbar(
xs,
y,
yerr,
fmt="none",
color=color,
linewidth=2.5,
capsize=10,
alpha=0.5,
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="765rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#765rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="765rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#765rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# Dembo's bounds on extreme eigenvalues of autocorrelation matrices
# of size p x p for p = 1, 2, ..., max_nlags
# https://doi.org/10.1109/18.2651
def dembo_bounds_of_autocorr_extr_eigvals(acf, max_nlags):
c = acf[0]
bounds_of_max = np.full((2, max_nlags), fill_value=c)
bounds_of_min = np.full((2, max_nlags), fill_value=c)
for p in range(2, max_nlags + 1):
i = p - 1
eta_1 = bounds_of_min[0, i - 1]
eta_pm1 = bounds_of_max[1, i - 1]
b = acf[1:p]
b_dot_b = np.dot(b, b)
sq_c_m_eta1_div4_bdotb = np.sqrt(((c - eta_1) ** 2) / 4 + b_dot_b)
sq_c_m_etapm1_div4_bdotb = np.sqrt(((c - eta_pm1) ** 2) / 4 + b_dot_b)
c_p_eta1_div2 = (c + eta_1) / 2
c_p_etapm1_div2 = (c + eta_pm1) / 2
# lower
bounds_of_max[0, i] = c_p_eta1_div2 + sq_c_m_eta1_div4_bdotb
# upper
bounds_of_max[1, i] = c_p_etapm1_div2 + sq_c_m_etapm1_div4_bdotb
# lower
bounds_of_min[0, i] = c_p_eta1_div2 - sq_c_m_eta1_div4_bdotb
# upper
bounds_of_min[1, i] = c_p_etapm1_div2 - sq_c_m_etapm1_div4_bdotb
return bounds_of_max, bounds_of_min
ts_idx = 5
nlags = 15
plot_extr_eigenvalues_bounds(
ts_idx, nlags, "Dembo", dembo_bounds_of_autocorr_extr_eigvals, plot_autocorr=True
)
%%html
<!--hide_me_please-->
<p id="1234765dfghnbrtyaa"></p>
<p class="hidecode" id="1234765dfghnbrty"><a href="javascript:code_toggle('#1234765dfghnbrtyaa')">hide codes in the notebook</a></p>
# Ma and Zarowski's bounds on minimal eigenvalues of autocorrelation matrices
# of size p x p for p = 1, 2, ..., max_nlags
# https://doi.org/10.1109/TIT.2008.928966
def ma_and_zarowski_bounds_of_autocorr_min_eigvals(acf, max_nlags):
t0 = acf[0]
bounds_of_max = None
bounds_of_min = np.full((2, max_nlags), fill_value=t0)
bounds_of_min[1, 0] += 1e-5
for p in range(2, max_nlags + 1):
i = p - 1
R_nm1 = cov_matrix(acf[: p - 1])
R_n = cov_matrix(acf[:p])
eta_1 = bounds_of_min[0, i - 1]
eta_pm1 = bounds_of_min[1, i - 1]
t0_plus_eta1 = t0 + eta_1
t0_plus_etapm1 = t0 + eta_pm1
d1 = np.linalg.det(R_n) / np.linalg.det(R_nm1)
# R_nm1_inv1 = np.linalg.inv(cov_matrix(acf[: p - 1]))
# b = np.flipud(acf[1:p])
# d1 = t0 - b @ R_nm1_inv1 @ b
sq_t0_plus_eta1_m_4_d1_eta1 = np.sqrt((t0_plus_eta1 ** 2) - 4 * d1 * eta_1)
sq_t0_plus_etapm1_m_4_d1_etapm1 = np.sqrt(
(t0_plus_etapm1 ** 2) - 4 * d1 * eta_pm1
)
# lower
bounds_of_min[0, i] = 0.5 * (t0_plus_eta1 - sq_t0_plus_eta1_m_4_d1_eta1)
# upper
bounds_of_min[1, i] = 0.5 * (t0_plus_etapm1 + sq_t0_plus_etapm1_m_4_d1_etapm1)
return bounds_of_max, bounds_of_min
%%html
<!--hide_me_please-->
<p id="12378asdfghtrhnbrtyaa"></p>
<p class="hidecode" id="12378asdfghtrhnbrty"><a href="javascript:code_toggle('#12378asdfghtrhnbrtyaa')">hide codes in the notebook</a></p>
ts_idx = 5
nlags = 20
plot_extr_eigenvalues_bounds(
ts_idx,
nlags,
"Ma and Zarowski",
ma_and_zarowski_bounds_of_autocorr_min_eigvals,
plot_autocorr=True,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="b12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#b12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="b12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#b12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
%%html
<!--hide_me_please-->
<script>initialize();</script>
<p class="showcode" id="fnitohaefw4343aa"><a href="javascript:code_toggle('#fnitohaefw4343')">show codes in the notebook</a></p>
<p class="hidecode" id="fnitohaefw4343"><a href="javascript:code_toggle('#fnitohaefw4343aa')">hide codes in the notebook</a></p>
# # Sun's bounds on minimal eigenvalues of autocorrelation matrices
# # of size p x p for p = 1, 2, ..., max_nlags
# # https://doi.org/10.1109/18.887894
def sun_bounds_of_autocorr_min_eigvals(acf, max_nlags):
t0 = acf[0]
bounds_of_max = None
bounds_of_min = np.full((2, max_nlags), fill_value=t0)
for p in range(2, max_nlags + 1):
i = p - 1
R_nm1_inv1 = np.linalg.inv(cov_matrix(acf[: p - 1]))
R_nm1_inv2 = R_nm1_inv1 @ R_nm1_inv1
R_nm1_inv3 = R_nm1_inv2 @ R_nm1_inv1
eta_1 = bounds_of_min[0, i - 1]
eta_pm1 = bounds_of_min[1, i - 1]
# print(f"eta1={eta_1}, eta_pm1={eta_pm1}")
b = np.flipud(acf[1:p])
d1 = t0 - b @ R_nm1_inv1 @ b
d2 = 1 + b @ R_nm1_inv2 @ b
d3eta1 = d2 - b @ R_nm1_inv3 @ b * eta_1
d3etapm1 = d2 - b @ R_nm1_inv3 @ b * eta_pm1
d1_d2eta1 = d1 + d2 * eta_1
d1_d2etapm1 = d1 + d2 * eta_pm1
sq_d1_plus_d2eta1_m_4d1d3eta1 = np.sqrt(
(d1_d2eta1 ** 2) - 4 * d1 * d3eta1 * eta_1
)
sq_d1_plus_d2etapm1_m_4d1d3etapm1 = np.sqrt(
(d1_d2etapm1 ** 2) - 4 * d1 * d3etapm1 * eta_pm1
)
# lower
bounds_of_min[0, i] = 0.5 * (d1_d2eta1 - sq_d1_plus_d2eta1_m_4d1d3eta1) / d3eta1
# upper
bounds_of_min[1, i] = (
0.5 * (d1_d2etapm1 - sq_d1_plus_d2etapm1_m_4d1d3etapm1) / d3etapm1
) + 0.1 * eta_pm1
return bounds_of_max, bounds_of_min
%%html
<!--hide_me_please-->
<p id="asdfghtrhnbrtyaa"></p>
<p class="hidecode" id="asdfghtrhnbrty"><a href="javascript:code_toggle('#asdfghtrhnbrtyaa')">hide codes in the notebook</a></p>
ts_idx = 5
nlags = 30
plot_extr_eigenvalues_bounds(
ts_idx, nlags, "Sun", sun_bounds_of_autocorr_min_eigvals, plot_autocorr=True
)
%%html
<!--hide_me_please-->
<p class="showcode" id="ytght12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#ytght12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="ytght12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#ytght12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
%%html
<!--hide_me_please-->
<p id="qwefdcv12345gfderthbvbvaa"></p>
<p class="hidecode" id="qwefdcv12345gfderthbvbv"><a href="javascript:code_toggle('#qwefdcv12345gfderthbvbvaa')">hide codes in the notebook</a></p>
# plot_mean_det
def plot_mean_det(begins, ends, title=None, subtitle=None, p=1, nlags=20):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
if title is None:
title = f"Mean Determinant"
kwargs = dict(
title=title,
subtitle=subtitle,
calc_xticks=True,
yscale="log",
xtitle="lag (matrix of size: lag x lag)",
)
fig = None
for ts, ts_name, color in [
(standard_trans_ts, "standard", "tab:blue"),
(ihs_trans_ts, "IHS", "tab:red"),
]:
mean_dets = np.zeros(nlags)
std_dets = np.zeros(nlags)
for lags in np.arange(1, nlags + 1):
dets = []
for begin, end in zip(begins, ends):
acov = acovf(ts[ts_idx][begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
dets.append(np.linalg.det(mcov))
mean_dets[lags - 1] = np.mean(dets)
std_dets[lags - 1] = np.std(dets)
fig = plot_stats(
mean_dets,
xs=np.arange(1, nlags + 1),
std=std_dets[0:],
std_xs=np.arange(1, nlags + 1),
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
%%html
<!--hide_me_please-->
<p id="mnbvc2343rfsfsfsfsaa"></p>
<p class="hidecode" id="mnbvc2343rfsfsfsfs"><a href="javascript:code_toggle('#mnbvc2343rfsfsfsfsaa')">hide codes in the notebook</a></p>
# Mean Determinant
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2
plot_mean_det(
begins,
ends,
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
p=p,
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p id="1qwefdcv12345gfderthbvbvaa"></p>
<p class="hidecode" id="1qwefdcv12345gfderthbvbv"><a href="javascript:code_toggle('#1qwefdcv12345gfderthbvbvaa')">hide codes in the notebook</a></p>
# plot_mean_det_ratios
def plot_mean_det_ratios(begins, ends, title=None, subtitle=None, p=1, nlags=20):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
if title is None:
title = "Mean Ration det(R_p) / det(R_{p-1})"
kwargs = dict(
title=title,
subtitle=subtitle,
calc_xticks=True,
yscale="log",
xtitle="lag (matrix of size: lag x lag)",
)
fig = None
for ts, ts_name, color in [
(standard_trans_ts, "standard", "tab:blue"),
(ihs_trans_ts, "IHS", "tab:red"),
]:
mean_det_rs = np.zeros(nlags - 1)
std_dets_rs = np.zeros(nlags - 1)
for lags in np.arange(2, nlags + 1):
dets = []
for begin, end in zip(begins, ends):
acov = acovf(ts[ts_idx][begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
mcov_pm1 = cov_matrix(acov[:-1])
dets.append(np.linalg.det(mcov) / np.linalg.det(mcov_pm1))
mean_det_rs[lags - 2] = np.mean(dets)
std_dets_rs[lags - 2] = np.std(dets)
fig = plot_stats(
mean_det_rs,
xs=np.arange(2, nlags + 1),
std=std_dets_rs[0:],
std_xs=np.arange(2, nlags + 1),
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
%%html
<!--hide_me_please-->
<p id="1mnbvc2343rfsfsfsfsaa"></p>
<p class="hidecode" id="1mnbvc2343rfsfsfsfs"><a href="javascript:code_toggle('#1mnbvc2343rfsfsfsfsaa')">hide codes in the notebook</a></p>
# Mean Determinant
alpha = 0.05
nlags = 60
ts_idx = 10
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
p = 2
plot_mean_det_ratios(
begins,
ends,
subtitle=f"{len(begins)} intervals of length {ts_len} from the same EEG signal (ts[{ts_idx}])",
p=p,
nlags=nlags,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
def ar_coeffs(x, p, method):
if method == ols:
xlags, x0 = lagmat(x, p, original="sep", trim="both")
xlags = add_constant(xlags)
params = lstsq(xlags[p:, : p + 1], x0[p:], rcond=None)[0]
ar_coeffs = params[1:]
else:
pacf_values = pacf(x, method=method, nlags=p)
ar_coeffs, _ = ar_from_pacf(pacf_values, nlags=p)
return ar_coeffs
# def plot_ar_diverse_intervals_experiments
def plot_ar_for_intervals(ts, ts_type, begins, ends, ps, method, p, **kwargs):
for begin, end, s in zip(begins, ends, ps):
color = (0, 1 - s * 0.5, 0)
values = ar_coeffs(ts[begin:end], p, method)
plot_stats(
values,
label=f"on [{begin},{end-1}]",
color=color,
**kwargs,
)
if fig is not None:
return fig
def plot_ar_diverse_intervals_experiments(
begins,
ends,
ps,
intervals_type,
methods,
p,
ts_list,
title=None,
subtitle=None,
**kwargs,
):
const_title = title is not None
const_subtitle = subtitle is not None
for method in methods:
method_descr = methods_descrs[method]
if not const_title:
title = f"{method.upper()} AR({p}) Coeffs Computed on {intervals_type}"
fig, axs = fig_with_vertical_subplots(
len(ts_list),
title=title,
subplots_titles=True,
ax_height=450,
fontsize=15,
)
for (ts, ts_type), ax in zip(ts_list, axs):
if not const_subtitle:
subtitle = f"{method_descr} for {ts_type}"
plot_ar_for_intervals(
ts,
ts_type,
begins,
ends,
ps,
method,
p,
ax=ax,
subtitle=subtitle,
**kwargs,
)
display(fig)
%%html
<!--hide_me_please-->
<p id="qw1234765dfghnbrtyaa"></p>
<p class="hidecode" id="qw1234765dfghnbrty"><a href="javascript:code_toggle('#qw1234765dfghnbrtyaa')">hide codes in the notebook</a></p>
# ld-biased ar experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_ar_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ld_biased],
p=20,
ts_list=[
(standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
showgrid=False,
)
%%html
<!--hide_me_please-->
<p class="showcode" id="hgfds45ygfsdfghjtraa"><a href="javascript:code_toggle('#hgfds45ygfsdfghjtr')">show codes in the notebook</a></p>
<p class="hidecode" id="hgfds45ygfsdfghjtr"><a href="javascript:code_toggle('#hgfds45ygfsdfghjtraa')">hide codes in the notebook</a></p>
# ols ar experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_ar_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ols],
p=20,
ts_list=[
(standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
)
%%html
<!--hide_me_please-->
<p class="showcode" id="1rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#1rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="1rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#1rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# def ar_means_and_stds
def ar_means_and_stds(
ts,
begins,
ends,
method,
p=20,
relative=False,
relative_to_abs_sum=False,
margin=None,
):
n = len(begins)
assert len(ends) == n
ar_coeffs_values = np.zeros((n, p), dtype=float)
for i, (begin, end) in enumerate(zip(begins, ends)):
res = ar_coeffs(ts[begin:end], method=method, p=p)
ar_coeffs_values[i, :] = res
mu = np.mean(ar_coeffs_values, 0)
std = np.std(ar_coeffs_values, 0)
if margin is not None:
if margin == "std":
margin = std
mu[np.abs(mu) < margin] = np.nan
if relative:
std *= 100 / mu
if relative_to_abs_sum:
std /= np.sum(np.abs(ar_coeffs_values), 0)
return mu, std
# Mean AR(p) Coefficients With Standard Deviations at Lags
for method in [ld_biased, ols]:
ts_idx = 5
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mu_standard, std_standard = ar_means_and_stds(
standard_trans_ts[ts_idx], begins, ends, method=method, p=p
)
std_bar_standard = np.transpose(
np.array([mu_standard - std_standard, mu_standard + std_standard])
)
mu_ihs, std_ihs = ar_means_and_stds(
ihs_trans_ts[ts_idx], begins, ends, method=method, p=p
)
std_bar_ihs = np.transpose(np.array([mu_ihs - std_ihs, mu_ihs + std_ihs]))
index_values = np.arange(1, p + 1)
fig = plot_stats(
mu_standard,
xs=index_values,
conf_intvs=std_bar_standard,
label="standard",
title=f"Mean AR({p}) Coefficients With Standard Deviations",
subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
showgrid=True,
ax_height=300,
calc_xticks=True,
# major_xticks_loc=index_values,
)
display(fig)
fig = plot_stats(
mu_ihs,
xs=index_values,
conf_intvs=std_bar_ihs,
color="tab:red",
label="IHS",
title=f"Mean AR({p}) Coefficients With Standard Deviations",
subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
showgrid=True,
ax_height=300,
calc_xticks=True,
# major_xticks_loc=index_values,
)
# fig.get_axes()
display(fig)
# Mean AR(p) Coefficients With Standard Deviations at Lags
for method in [ld_biased, ols]:
ts_idx = 5
p = 50
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mu_standard, std_standard = ar_means_and_stds(
standard_trans_ts[ts_idx], begins, ends, method=method, p=p
)
std_bar_standard = np.transpose(
np.array([mu_standard - std_standard, mu_standard + std_standard])
)
mu_ihs, std_ihs = ar_means_and_stds(
ihs_trans_ts[ts_idx], begins, ends, method=method, p=p
)
std_bar_ihs = np.transpose(np.array([mu_ihs - std_ihs, mu_ihs + std_ihs]))
index_values = np.arange(1, p + 1)
fig = plot_stats(
mu_standard,
xs=index_values,
conf_intvs=std_bar_standard,
label="standardized",
title=f"Mean AR({p}) Coefficients With Standard Deviations",
subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
showgrid=True,
ax_height=300,
calc_xticks=True,
)
display(fig)
fig = plot_stats(
mu_ihs,
xs=index_values,
conf_intvs=std_bar_ihs,
color="tab:orange",
label="IHS-normalized",
title=f"Mean AR({p}) Coefficients With Standard Deviations",
subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
showgrid=True,
ax_height=300,
calc_xticks=True,
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="12rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_std(tss, method):
mean_std_arr = np.zeros(p)
for ts in tss:
_, std = ar_means_and_stds(
ts, begins, ends, method=method, p=p, relative_to_abs_sum=True
)
mean_std_arr += std
return mean_std_arr / len(tss)
kwargs = {}
for method, color in zip([ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]):
mean_std_standard = mean_std(standard_trans_ts, method=method)
mean_std_ihs = mean_std(ihs_trans_ts, method=method)
fig = plot_ts(
mean_std_standard,
color=color,
linestyle="-",
marker="o",
name=f"{method} method on standard",
title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="lag",
height=800,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
color=color,
linestyle="--",
marker="o",
name=f"{method} method on IHS",
**kwargs,
)
display(fig)
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_std(tss, method):
mean_std_arr = np.zeros(p)
for ts in tss:
_, std = ar_means_and_stds(
ts, begins, ends, method=method, p=p, relative_to_abs_sum=True
)
mean_std_arr += std
return mean_std_arr / len(tss)
kwargs = dict(
title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="lag",
ax_height=300,
calc_xticks=True,
)
pacf_method = ld_biased
mean_std_standard = mean_std(standard_trans_ts, method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, method=pacf_method)
fig = plot_ts(
mean_std_standard,
color="darkblue",
linestyle="-",
marker="o",
name=f"standardized",
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
color="tab:orange",
linestyle="-",
marker="o",
name=f"IHS-normalized",
**kwargs,
)
display(fig)
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_std(tss, method):
mean_std_arr = np.zeros(p)
for ts in tss:
_, std = ar_means_and_stds(
ts, begins, ends, method=method, p=p, relative=True, margin="std"
)
mean_std_arr += np.abs(std)
return mean_std_arr / len(tss)
kwargs = dict(
title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="lag",
ax_height=300,
calc_xticks=True,
# yscale="log",
xmargin=0.02,
)
pacf_method = ld_biased
mean_std_standard = mean_std(standard_trans_ts, method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, method=pacf_method)
index_values = np.arange(1, p + 1)
fig = plot_ts(
mean_std_standard,
index_values=index_values,
color="darkblue",
linestyle="",
marker="o",
name=f"{pacf_methods_names[pacf_method]} method for standardized",
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
index_values=index_values,
color="tab:orange",
linestyle="",
marker="o",
name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
**kwargs,
)
# ax = fig.get_axes()
# ax.set_y
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="123rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#123rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="123rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#123rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# ar_one_step_pred
def ar_one_step_pred(x, y, ar_coeffs, rand_std=None):
"""
If rand_std is not None, then a random error of N(0,rand_std^2) is added to each predicted value.
"""
n = len(x)
ar_flipped = np.flipud(ar_coeffs)
p = len(ar_flipped)
y[:p] = x[:p]
for i in range(p, n):
d = np.dot(x[i - p : i], ar_flipped)
if rand_std is not None:
d += float(np.random.normal(0, rand_std, 1))
y[i] = d
# ar_fit_stats
def ar_fit_stats(add_errors=False):
# scorings_names = ["mae", "mape", "mase"]
scorings_names = ["mae"]
# pacf_methods = [ols, burg, ld_biased]
pacf_methods = [ols, ld_biased]
pred_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
ar_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
pacf_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
for ts, ts_type in zip(
[standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
["standard", "IHS"],
):
for pacf_method in pacf_methods:
pacf_values = pacf(ts[:n], method=pacf_method, nlags=p)
ar_coeffs, _ = ar_from_pacf(pacf_values, nlags=p)
x = ts.values
y = np.zeros_like(x)
ar_one_step_pred(x, y, ar_coeffs)
y = (y - np.mean(y)) / np.std(y)
pacf_values_y = pacf(y[-n:], method=pacf_method, nlags=p)
ar_coeffs_y, _ = ar_from_pacf(pacf_values_y, nlags=p)
for scoring_name in scorings_names:
scoring = get_scoring(scoring_name)
model_name = f"{pacf_method} AR({p}) of {ts_type}"
pred_diffs_errors[scoring_name].append(
(np.round(scoring(x, y), 3), model_name)
)
ar_diffs_errors[scoring_name].append(
(np.round(scoring(ar_coeffs, ar_coeffs_y), 3), model_name)
)
pacf_diffs_errors[scoring_name].append(
(np.round(scoring(pacf_values, pacf_values_y), 3), model_name)
)
for stats, stats_name, in zip(
[pred_diffs_errors, pacf_diffs_errors, ar_diffs_errors],
[
"Differences between x and predicted y",
"Differences between pacf_x and pacf_y",
"Differences between AR coeffs of x and y",
],
):
print(f"{stats_name}:")
for scoring_name in scorings_names:
stats[scoring_name] = sorted(stats[scoring_name])
print(f" {scoring_name}:")
for res, name in stats[scoring_name]:
print(f" {res} – {name}")
n = 1000
ts_idx = 5
p = 16
ar_fit_stats()
# ar_fit_stats
def ar_fit_stats(add_errors=False):
scorings_names = ["mae", "mse", "mape", "mase"]
# scorings_names = ["mae"]
pacf_methods = [ols, burg, ld_biased]
# pacf_methods = [ols, ld_biased]
pred_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
ar_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
pacf_diffs_errors = {scoring_name: [] for scoring_name in scorings_names}
for ts, ts_type in zip(
[standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
["standard", "IHS"],
):
for pacf_method in pacf_methods:
pacf_values = pacf(ts[:n], method=pacf_method, nlags=p)
ar_coeffs, _ = ar_from_pacf(pacf_values, nlags=p)
x = ts.values
y = np.zeros_like(x)
ar_one_step_pred(x, y, ar_coeffs)
y = (y - np.mean(y)) / np.std(y)
pacf_values_y = pacf(y[-n:], method=pacf_method, nlags=p)
ar_coeffs_y, _ = ar_from_pacf(pacf_values_y, nlags=p)
for scoring_name in scorings_names:
scoring = get_scoring(scoring_name)
model_name = f"{pacf_method} AR({p}) of {ts_type}"
pred_diffs_errors[scoring_name].append(
(np.round(scoring(x, y), 3), model_name)
)
ar_diffs_errors[scoring_name].append(
(np.round(scoring(ar_coeffs, ar_coeffs_y), 3), model_name)
)
pacf_diffs_errors[scoring_name].append(
(np.round(scoring(pacf_values, pacf_values_y), 3), model_name)
)
for stats, stats_name, in zip(
[pred_diffs_errors, pacf_diffs_errors, ar_diffs_errors],
[
"Differences between x and predicted y",
"Differences between pacf_x and pacf_y",
"Differences between AR coeffs of x and y",
],
):
print(f"{stats_name}:")
for scoring_name in scorings_names:
stats[scoring_name] = sorted(stats[scoring_name])
print(f" {scoring_name}:")
for res, name in stats[scoring_name]:
print(f" {res} – {name}")
n = 1000
ts_idx = 5
p = 25
ar_fit_stats()
%%html
<!--hide_me_please-->
<p class="showcode" id="1234rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#1234rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="1234rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#1234rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
def dft(x):
"""
Function to calculate the
discrete Fourier Transform
of a 1D real-valued signal x
"""
N = len(x)
n = np.arange(N)
k = n.reshape((N, 1))
e = np.exp(-2j * np.pi * k * n / N)
X = np.dot(e, x)
return X
# psd_dft_plots
kwargs1 = dict(
xtitle="Freq (Hz)",
ytitle="Amplitude",
title="Power Spectral Density by DFT",
)
kwargs2 = dict(
xtitle="Freq (Hz)",
ytitle="Amplitude",
title="Smoothed Power Spectral Density by DFT",
)
kwargs3 = dict(
xtitle="Freq (Hz)",
ytitle="Amplitude",
title="Smoothed Power Spectral Density by DFT",
yscale="log"
)
ts_idx = 5
for ts, ts_type, color in zip(
[standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
["standard", "IHS"],
["tab:blue", "tab:red"],
):
dft_values = dft(ts)
# sampling rate
sr = 128
# sampling interval
sintv = 1.0 / sr
# calculate the frequency
N = len(dft_values)
T = N / sr
freq = np.arange(N) / T
N //= 2
dft_values = dft_values[:N] / N
freq = freq[:N]
psd = pd.Series(np.abs(dft_values))
fig1 = plot_stats(
psd, xs=freq, name=ts_type, marker="", color=color, alpha=0.5, **kwargs1
)
kwargs1 = dict(fig=fig1)
smoothed_psd = get_smoothed(get_smoothed(psd, weights=np.arange(4 * T + 1)), std=T)
fig2 = plot_ts(
smoothed_psd, index_values=freq, name=ts_type, color=color, **kwargs2
)
kwargs2 = dict(fig=fig2)
fig3 = plot_ts(
smoothed_psd, index_values=freq, name=ts_type, color=color, **kwargs3
)
kwargs3 = dict(fig=fig3)
display(fig1)
display(fig2)
display(fig3)
%%html
<!--hide_me_please-->
<p class="showcode" id="12345rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12345rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12345rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12345rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# psd_fun_from_ar
def psd_fun_from_ar(ar_coeffs, ts, sampT):
p = len(ar_coeffs)
x = ts.values
y = np.zeros_like(x)
ar_one_step_pred(x, y, ar_coeffs)
s = np.std(x - y) ** 2
return np.vectorize(
lambda f: s
* sampT
/ (
np.abs(
(
1
- np.dot(
np.exp(-2j * np.pi * f * sampT * np.arange(1, p + 1)),
ar_coeffs,
)
)
)
** 2
)
)
# psd_from_ar_plots
p = 50
for pacf_method in [ols, ld_biased, burg]:
kwargs = dict(
xtitle="Freq (Hz)",
ytitle="Amplitude",
title=f"Power Spectral Density by {pacf_method.upper()} AR({p})",
)
for ts, ts_type, color in zip(
[standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
["standard", "IHS"],
["tab:blue", "tab:red"],
):
pacf_values = pacf(ts, method=pacf_method, nlags=p)
ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
sr = 128
psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
N = 1000
freq = np.arange(N) / (2 * N) * sr
fig = plot_ts(
psd(freq),
# yscale="log",
index_values=freq,
name=ts_type,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
display(fig)
# psd_from_ar_plots
p = 30
for pacf_method in [ols, ld_biased, burg]:
kwargs = dict(
xtitle="Freq (Hz)",
ytitle="Amplitude",
title=f"Power Spectral Density by {pacf_method.upper()} AR({p})",
)
for ts, ts_type, color in zip(
[standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
["standard", "IHS"],
["tab:blue", "tab:red"],
):
pacf_values = pacf(ts, method=pacf_method, nlags=p)
ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
sr = 128
psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
N = 1000
freq = np.arange(N) / (2 * N) * sr
fig = plot_ts(
psd(freq),
# yscale="log",
index_values=freq,
name=ts_type,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
display(fig)
# psd_from_ar_plots
p = 30
for pacf_method in [ld_biased]:
kwargs = dict(
xtitle="Freq (Hz)",
ytitle="Amplitude",
title=f"Power Spectral Density by {pacf_method.upper()} AR({p})",
)
for ts, ts_type, color in zip(
[standard_trans_ts[ts_idx], ihs_trans_ts[ts_idx]],
["standard", "IHS"],
["tab:blue", "tab:red"],
):
pacf_values = pacf(ts, method=pacf_method, nlags=p)
ar_cs, _ = ar_from_pacf(pacf_values, nlags=p)
sr = 128
psd = psd_fun_from_ar(ar_cs, ts, 1.0 / sr)
N = 1000
freq = np.arange(N) / (2 * N) * sr
fig = plot_ts(
psd(freq),
# yscale="log",
index_values=freq,
name=ts_type,
color=color,
**kwargs,
)
kwargs = dict(fig=fig)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="123456rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#123456rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="123456rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#123456rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# def ar_psd_means_and_stds
def ar_psd_means_and_stds(
ts,
begins,
ends,
sr,
freqs,
method,
p=20,
relative=False,
relative_to_abs_sum=False,
relative_to_mean_area=False,
unit_area=True,
):
n = len(begins)
assert len(ends) == n
psd_values = np.zeros((n, N), dtype=float)
for i, (begin, end) in enumerate(zip(begins, ends)):
ar_coeffs_values = ar_coeffs(ts[begin:end], method=method, p=p)
psd_values[i, :] = psd_fun_from_ar(ar_coeffs_values, ts, 1.0 / sr)(freqs)
if unit_area:
psd_values[i, :] /= np.sum(psd_values[i, :])
mu = np.mean(psd_values, 0)
std = np.std(psd_values, 0)
if relative:
std /= mu
if relative_to_abs_sum:
std /= np.sum(np.abs(psd_values), 0)
if relative_to_mean_area:
std /= np.sum(mu)
return mu, std
%%html
<!--hide_me_please-->
<p id="gfd1234765dfghnbrtyaa"></p>
<p class="hidecode" id="gfd1234765dfghnbrty"><a href="javascript:code_toggle('#gfd1234765dfghnbrtyaa')">hide codes in the notebook</a></p>
# Mean PSD by AR(p) With Standard Deviations
for method in [ld_biased, burg]:
ts_idx = 5
p = 50
ts_len = 2000
N = ts_len // 2
sr = 128
freqs = np.arange(N) / (2 * N) * sr
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mu_standard, std_standard = ar_psd_means_and_stds(
standard_trans_ts[ts_idx],
begins,
ends,
method=method,
p=p,
sr=sr,
freqs=freqs,
unit_area=True,
relative_to_mean_area=True,
)
mu_ihs, std_ihs = ar_psd_means_and_stds(
ihs_trans_ts[ts_idx],
begins,
ends,
method=method,
p=p,
sr=sr,
freqs=freqs,
unit_area=True,
relative_to_mean_area=True,
)
fig = plot_ts(
mu_standard,
index_values=freqs,
name="standard",
title=f"Mean PSD by AR({p}) With Standard Deviations",
subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
xtitle="Freq (Hz)",
ytitle="Amplitude",
showgrid=True,
)
ax = fig.get_axes()[0]
ax.fill_between(
freqs,
std_standard,
np.zeros_like(std_standard),
alpha=0.25,
color="tab:blue",
)
fig = plot_ts(mu_ihs, index_values=freqs, color="tab:red", name="IHS", fig=fig)
ax.fill_between(
freqs,
std_ihs,
np.zeros_like(std_ihs),
alpha=0.25,
color="tab:red",
)
display(fig)
fig = plot_ts(
mu_standard,
index_values=freqs,
name="standard",
title=f"Mean PSD by AR({p}) With Standard Deviations",
subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
xtitle="Freq (Hz)",
ytitle="Amplitude",
showgrid=True,
yscale="log",
)
ax = fig.get_axes()[0]
ax.fill_between(
freqs,
mu_standard + std_standard,
mu_standard - std_standard,
alpha=0.25,
color="tab:blue",
)
fig = plot_ts(mu_ihs, index_values=freqs, color="tab:red", name="IHS", fig=fig)
ax.fill_between(
freqs,
mu_ihs + std_ihs,
mu_ihs - std_ihs,
alpha=0.25,
color="tab:red",
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="12345678rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#12345678rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="12345678rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#12345678rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# mean standard deviation of nth coefficients AR(p)
ps = np.array([5, 10, 15, 20, 25, 30, 35, 40, 50, 60, 70, 80, 90, 100])
ts_len = 2000
N = ts_len // 2
sr = 128
freqs = np.arange(N) / (2 * N) * sr
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_stds(tss, freqs, method, ps):
mean_std_psd = np.zeros(len(ps))
for i, p in enumerate(ps):
for ts in tss:
_, std = ar_psd_means_and_stds(
ts, begins, ends, sr, freqs, method, p=p, unit_area=True
)
mean_std_psd[i] += np.sum(std)
return mean_std_psd / len(tss)
mean_std_psd_standard_lst = [mean_stds(standard_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased, burg, ols]]
mean_std_psd_ihs_lst = [mean_stds(ihs_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased, burg, ols]]
# mean_std_psd_standard_lst = [
# mean_stds(standard_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased]
# ]
# mean_std_psd_ihs_lst = [
# mean_stds(ihs_trans_ts, freqs, method=method, ps=ps) for method in [ld_biased]
# ]
kwargs = dict(
title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="p (order of AR)",
xmargin=0.02,
calc_xticks=True,
# yscale="log",
)
for method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
[ld_biased, burg, ols],
[("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
zip(mean_std_psd_standard_lst, mean_std_psd_ihs_lst),
):
fig = plot_ts(
mean_std_standard,
index_values=ps,
name=f"{pacf_methods_names[method]} method on the standardized",
color="darkblue",
linestyle=linestyle,
linewidth=linewidth,
marker="o",
height=800,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
index_values=ps,
name=f"{pacf_methods_names[method]} method on the IHS-normalized",
color="tab:orange",
linestyle=linestyle,
linewidth=linewidth,
marker="o",
**kwargs,
)
display(fig)
kwargs = dict(
title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="p (order of AR)",
xmargin=0.02,
calc_xticks=True,
# yscale="log",
)
for method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
[ld_biased], [("-", 1.5)], zip(mean_std_psd_standard_lst, mean_std_psd_ihs_lst)
):
# [ld_biased, burg, ols],
# [("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
# zip(mean_std_standard_lst, mean_std_ihs_lst)):
fig = plot_ts(
mean_std_standard,
index_values=ps,
name=f"{pacf_methods_names[method]} method on the standardized",
color="darkblue",
linestyle=linestyle,
linewidth=linewidth,
marker="o",
height=800,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
index_values=ps,
name=f"{pacf_methods_names[method]} method on the IHS-normalized",
color="tab:orange",
linestyle=linestyle,
linewidth=linewidth,
marker="o",
**kwargs,
)
display(fig)
%%html
<!--hide_me_please-->
<p class="showcode" id="zxc12345678rfdqwdfnmjhtujkertgfaa"><a href="javascript:code_toggle('#zxc12345678rfdqwdfnmjhtujkertgf')">show codes in the notebook</a></p>
<p class="hidecode" id="zxc12345678rfdqwdfnmjhtujkertgf"><a href="javascript:code_toggle('#zxc12345678rfdqwdfnmjhtujkertgfaa')">hide codes in the notebook</a></p>
# mean standard deviation of nth coefficients AR(p)
ps = np.array(
[
5,
10,
15,
20,
25,
30,
35,
40,
50,
60,
80,
100,
150,
200,
]
)
ts_len = 2000
N = 256
sr = 128
freqs = np.arange(N) / (2 * N) * sr
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_stds(tss, freqs, method, ps):
mean_std_psd = np.zeros(len(ps))
for i, p in enumerate(ps):
for ts in tss:
_, std = ar_psd_means_and_stds(
ts, begins, ends, sr, freqs, method, p=p, relative_to_mean_area=True
)
mean_std_psd[i] += np.sum(std)
return mean_std_psd / len(tss)
kwargs = dict(
title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="p (order of AR)",
# yscale="log",
)
for method, color in zip([ld_biased, burg, ols], ["tab:blue", "darkred", "tab:orange"]):
mean_std_standard = mean_stds(standard_trans_ts, freqs, method=method, ps=ps)
mean_std_ihs = mean_stds(ihs_trans_ts, freqs, method=method, ps=ps)
fig = plot_ts(
mean_std_standard,
index_values=ps,
name=f"{method} method on standard",
color=color,
linestyle="-",
marker="o",
height=800,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
index_values=ps,
name=f"{method} method on IHS",
color=color,
linestyle="--",
marker="o",
**kwargs,
)
display(fig)
# def plot_diverse_intervals_experiments
def plot_pacf_for_intervals(
ts, ts_type, begins, ends, ps, pacf_method, alpha=0.05, nlags=None, **kwargs
):
for begin, end, p in zip(begins, ends, ps):
color = (0, 1 - p * 0.5, 0)
plot_pacf(
ts[begin:end],
alpha=alpha,
nlags=nlags,
method=pacf_method,
# label=f"[{begin},{end-1}]",
color=color,
conf_alpha=0.02,
**kwargs,
)
if fig is not None:
return fig
def plot_diverse_intervals_experiments(
begins,
ends,
ps,
intervals_type,
pacf_methods,
ts_list,
title=None,
subtitle=None,
**kwargs,
):
for pacf_method in pacf_methods:
method_descr = methods_descrs[pacf_method]
fig, axs = fig_with_vertical_subplots(
len(ts_list),
title=None,
# subplots_titles=True,
ax_height=200,
fontsize=15,
)
for (ts, ts_type), ax in zip(ts_list, axs):
ax.set_ylim(bottom=-0.7, top=1.05)
plot_pacf_for_intervals(
ts,
ts_type,
begins,
ends,
ps,
pacf_method,
plot_params={},
ax=ax,
**kwargs,
)
display(fig)
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ld_biased, ols, burg],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
],
nlags=nlags,
)
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 4000
end = begin + ts_len
ends = list(np.arange(end, end + 10, 1))
begins = [begin + n for n in range(10)]
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ols],
ts_list=[
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ld_biased, ols, burg],
ts_list=[
(standard_trans_ts[ts_idx], f"Standard normalized (idx={ts_idx})"),
],
nlags=nlags,
)
# ld-biased pacf experiments on highly overlapping intervals
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
nlags = 100
plot_diverse_intervals_experiments(
begins,
ends,
ps,
f"[n,n+{ts_len})",
[ols],
ts_list=[
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
nlags=nlags,
)
# mean std of nth lag of pacf
nlags = 100
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
def mean_std(tss, pacf_method):
mean_std_arr = np.zeros(nlags + 1)
for ts in tss:
_, std, _ = pacf_means_and_stds(
ts, begins, ends, pacf_method=pacf_method, nlags=nlags
)
mean_std_arr += std
return mean_std_arr / len(tss)
kwargs = dict(
xtitle="lag",
height=500,
)
for pacf_method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
[ld_biased, burg, ols],
[("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
zip(mean_std_standard_lst, mean_std_ihs_lst),
):
fig = plot_ts(
get_smoothed(pd.Series(mean_std_standard), std=2, only_prevs=False),
color="darkblue",
linestyle=linestyle,
linewidth=linewidth,
# alpha=0.5,
# marker="o",
name=f"{pacf_methods_names[pacf_method]} method for standardized",
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
get_smoothed(pd.Series(mean_std_ihs), std=2, only_prevs=False),
color="tab:orange",
linestyle=linestyle,
linewidth=linewidth,
# alpha=0.5,
# marker="o",
name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
**kwargs,
)
display(fig)
# def plot_ar_diverse_intervals_experiments
def plot_ar_for_intervals(ts, ts_type, begins, ends, ps, method, p, **kwargs):
fig = None
kwargs["calc_xticks"] = True
kwargs["major_xstep"] = 1
for begin, end, s in zip(begins, ends, ps):
color = (0, 1 - s * 0.5, 0)
values = ar_coeffs(ts[begin:end], p, method)
fig = plot_stats(
values,
xs=np.arange(1, p+1),
# label=f"on [{begin},{end-1}]",
color=color,
# showgrid=True,
fig=fig,
**kwargs,
)
if "calc_xticks" in kwargs:
kwargs.pop("calc_xticks")
if fig is not None:
return fig
def plot_ar_diverse_intervals_experiments(
begins,
ends,
ps,
methods,
p,
ts_list,
title=None,
subtitle=None,
**kwargs,
):
const_title = title is not None
const_subtitle = subtitle is not None
for method in methods:
method_descr = methods_descrs[method]
for ts, ts_type in ts_list:
fig = plot_ar_for_intervals(
ts,
ts_type,
begins,
ends,
ps,
method,
p,
**kwargs,
)
display(fig)
# ld-biased ar experiments
ts_idx = 5
ts_len = 2000
begin = 0
end = begin + ts_len
ends = list(np.arange(end, end + 8001, 750))
begins = list(np.arange(begin, begin + 6001, 750))
ps = [(end - ends[0]) / (ends[-1] - ends[0]) for end in ends]
plot_ar_diverse_intervals_experiments(
begins,
ends,
ps,
methods=[ld_biased, ols, burg],
p=20,
ts_list=[
(standard_trans_ts[ts_idx], f"Standard (idx={ts_idx})"),
(ihs_trans_ts[ts_idx], f"IHS-normalized (idx={ts_idx})"),
],
title="",
ax_height=200,
)
# mean standard deviation of nth coefficients AR(p)
p = 20
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_std(tss, method):
mean_std_arr = np.zeros(p)
for ts in tss:
_, std = ar_means_and_stds(
ts, begins, ends, method=method, p=p, relative=True, margin="std"
)
mean_std_arr += np.abs(std)
return mean_std_arr / len(tss)
kwargs = dict(
# title=f"Mean Relative Standard Deviation of the n-th AR({p}) Coefficient",
# subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="k",
ax_height=300,
calc_xticks=True,
# yscale="log",
xmargin=0.02,
)
pacf_method = ld_biased
mean_std_standard = mean_std(standard_trans_ts, method=pacf_method)
mean_std_ihs = mean_std(ihs_trans_ts, method=pacf_method)
index_values = np.arange(1, p + 1)
fig = plot_ts(
mean_std_standard,
index_values=index_values,
color="darkblue",
linestyle="",
marker="o",
name=f"{pacf_methods_names[pacf_method]} method for standardized",
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
index_values=index_values,
color="tab:orange",
linestyle="",
marker="o",
name=f"{pacf_methods_names[pacf_method]} method for IHS-normalized",
**kwargs,
)
# ax = fig.get_axes()
# ax.set_y
display(fig)
# Mean PSD by AR(p) With Standard Deviations
for method in [ld_biased, ols, burg]:
ts_idx = 5
p = 50
ts_len = 2000
N = ts_len // 2
sr = 128
freqs = np.arange(N) / (2 * N) * sr
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
mu_standard, std_standard = ar_psd_means_and_stds(
standard_trans_ts[ts_idx],
begins,
ends,
method=method,
p=p,
sr=sr,
freqs=freqs,
unit_area=False,
relative_to_mean_area=False,
)
mu_ihs, std_ihs = ar_psd_means_and_stds(
ihs_trans_ts[ts_idx],
begins,
ends,
method=method,
p=p,
sr=sr,
freqs=freqs,
unit_area=False,
relative_to_mean_area=False,
)
index = np.where((freqs >= 3) & (freqs <= 12))
fig = plot_ts(
mu_standard,
index=index,
index_values=freqs[index],
name="standardized",
# title=f"Mean PSD by AR({p}) With Standard Deviations",
# subtitle=f"{method} method for samples ({len(begins)} of length {ts_len}) from the same EEG signal (ts[{ts_idx}])",
xtitle="Freq (Hz)",
ytitle="Amplitude",
color="darkblue",
# calc_xticks=True,
ax_height=300,
showgrid=True,
)
ax = fig.get_axes()[0]
ax.fill_between(
freqs[index],
std_standard[index],
np.zeros_like(std_standard[index]),
alpha=0.1,
color="darkblue",
)
fig = plot_ts(
mu_ihs[index],
index_values=freqs[index],
color="tab:orange",
name="IHS-normalized",
fig=fig,
)
ax.fill_between(
freqs[index],
std_ihs[index],
np.zeros_like(std_ihs[index]),
alpha=0.25,
color="tab:orange",
)
ax.set_ylim(top=0.5)
display(fig)
freqs[index]
kwargs = dict(
# title=f"Mean Area-Relative Standard Deviation of PSD by AR(p)",
# subtitle=f"Mean of the {len(eeg_ts)} STDs of groups consisting of {len(begins)} Intervals.\nEach of {len(begins)} Intervals is computed for samples (of length {ts_len}) from the same EEG signal.",
xtitle="p (the order of the AR model)",
xmargin=0.02,
calc_xticks=True,
# yscale="log",
)
for method, (linestyle, linewidth), (mean_std_standard, mean_std_ihs) in zip(
[ld_biased, burg, ols],
[("-", 1.5), ((0, (6, 7)), 1.7), ((0, (1, 3)), 3)],
zip(mean_std_psd_standard_lst, mean_std_psd_ihs_lst),
):
fig = plot_ts(
mean_std_standard,
index_values=ps,
name=f"{pacf_methods_names[method]} method on the standardized",
color="darkblue",
linestyle=linestyle,
linewidth=linewidth,
marker="o",
height=550,
**kwargs,
)
kwargs = dict(fig=fig)
fig = plot_ts(
mean_std_ihs,
index_values=ps,
name=f"{pacf_methods_names[method]} method on the IHS-normalized",
color="tab:orange",
linestyle=linestyle,
linewidth=linewidth,
marker="o",
**kwargs,
)
display(fig)
# Q-Q plot: EEG data versus a normal distribution
n = 7000
begin = 1000
ts_idx = 5
normal_samples = np.apply_along_axis(norm.ppf, 0, np.linspace(0.0, 1.0, n))
normal_q = pd.Series(normal_samples, index=normal_samples)
standard_q = pd.Series(
np.sort(standard_trans_ts[ts_idx][begin : begin + n]),
index=normal_samples,
)
ihs_q = pd.Series(
np.sort(ihs_trans_ts[ts_idx][begin : begin + n]), index=normal_samples
)
fig = plot_ts(
normal_q,
color="black",
xtitle="normal theoretical quantiles",
ytitle="data quantiles",
# title="Quantile-Quantile Plot for EEG Data",
# subtitle="EEG data versus a normal distribution",
width=500,
height=500,
)
markersize = 5
plot_ts(
standard_q,
color="darkblue",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="standardized",
)
plot_ts(
ihs_q,
color="tab:orange",
marker="o",
markersize=markersize,
mfc="none",
linestyle="None",
alpha=0.5,
fig=fig,
name="IHS-normalized",
)
display(fig)
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
# tss = tss[0:2]
mean_norms = np.zeros((len(tss), nlags + 1))
std_norms = np.zeros((len(tss), nlags + 1))
for i, ts in enumerate(tss):
for lags in range(1, nlags + 1):
norms = []
for begin, end in zip(begins, ends):
acov = acovf(ts[begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
if inv:
mcov = np.linalg.inv(mcov)
norms.append(np.linalg.norm(mcov, p))
mean_norms[i][lags] = np.mean(norms)
std_norms[i][lags] = np.std(norms)
return (
pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
)
# return (
# pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
# pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
# )
p = 2
kwargs = dict(
xtitle="$h$ (matrix size)",
ax_height=300,
calc_xticks=True,
)
fig = None
for ts, ts_name, color, conf_alpha, inv in [
(standard_trans_ts, "$||\hat{R}_h||_2$ for standardized", "darkblue", 0.1, False),
(ihs_trans_ts, "$||\hat{R}_h||_2$ for IHS-normalized", "tab:orange", 0.25, False),
]:
mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
# std_bar = np.transpose(
# np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
# )
std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
fig = plot_stats(
mean_norms.values,
xs=mean_norms.index.values,
conf_intvs=std_bar,
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
conf_alpha=conf_alpha,
yscale="log",
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
# means_and_stds_of_1-norm_and_2-norm
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
print(f"Intervals: {[[begin, end-1] for begin, end in zip(begins, ends)]}")
def mean_of_q_norm_means_and_stds(tss, p=1, inv=False):
# tss = tss[0:2]
mean_norms = np.zeros((len(tss), nlags + 1))
std_norms = np.zeros((len(tss), nlags + 1))
for i, ts in enumerate(tss):
for lags in range(1, nlags + 1):
norms = []
for begin, end in zip(begins, ends):
acov = acovf(ts[begin:end], fft=False)[:lags]
acov /= acov[0]
mcov = cov_matrix(acov)
if inv:
mcov = np.linalg.inv(mcov)
norms.append(np.linalg.norm(mcov, p))
mean_norms[i][lags] = np.mean(norms)
std_norms[i][lags] = np.std(norms)
return (
pd.Series(np.mean(mean_norms, 0)[1:], index=range(1, nlags + 1)),
pd.Series(np.std(mean_norms, 0)[1:], index=range(1, nlags + 1)),
)
# return (
# pd.Series(np.around(np.mean(mean_norms, 0)[1:], 1), index=range(1, nlags + 1)),
# pd.Series(np.around(np.mean(std_norm, 0), 1)[1:], index=range(1, nlags + 1)),
# )
p = 2
kwargs = dict(
xtitle="$h$ (matrix size)",
ax_height=300,
calc_xticks=True,
)
fig = None
for ts, ts_name, color, conf_alpha, inv in [
(standard_trans_ts, "$||\hat{R}_h^{-1}||_2$ for standardized", "darkblue", 0.1, True),
(ihs_trans_ts, "$||\hat{R}_h^{-1}||_2$ for IHS-normalized", "tab:orange", 0.25, True),
]:
mean_norms, std_norms = mean_of_q_norm_means_and_stds(ts, inv=inv, p=p)
# std_bar = np.transpose(
# np.array([np.around(mean_norms - std_norms, 1), mean_norms + std_norms])
# )
std_bar = np.transpose(np.array([mean_norms - std_norms, mean_norms + std_norms]))
fig = plot_stats(
mean_norms.values,
xs=mean_norms.index.values,
conf_intvs=std_bar,
fill_along_axis=False,
fill_only_positive=False,
label=ts_name,
color=color,
conf_alpha=conf_alpha,
yscale="log",
**kwargs,
)
kwargs = dict(fig=fig)
ax_settings(fig=fig, showgrid=True)
display(fig)
# means_and_stds_autocorr
def means_and_stds_autocorr(
begins, ends, title=None, subtitle=None, nlags=20, alpha=0.05
):
print(f"Intervals: {[[begin, end] for begin, end in zip(begins, ends)]}")
ts_len = ends[0] - begins[0]
kwargs = dict(title=title, subtitle=subtitle, ax_height=300)
fig = None
for ts, ts_name, color in [
(standard_trans_ts, "standardized", "darkblue"),
(ihs_trans_ts, "IHS-normalized", "tab:orange"),
]:
means = np.zeros((len(ts), nlags + 1))
stds = np.zeros((len(ts), nlags + 1))
for ts_idx in range(len(ts)):
autocorrs = np.zeros((len(begins), nlags + 1))
for i, (begin, end) in enumerate(zip(begins, ends)):
autocorrs[i, :] = acovf(ts[ts_idx][begin:end], fft=False)[: nlags + 1]
autocorrs[i, :] /= autocorrs[i, 0]
means[ts_idx, :] = np.mean(autocorrs, 0)
stds[ts_idx, :] = np.std(autocorrs, 0)
mean_mean = np.mean(means, 0)
mean_std = np.mean(stds, 0)
fig = plot_stats(
mean_mean,
std=mean_std,
conf_alpha=0.1,
xs=np.arange(0, nlags + 1),
label=ts_name,
color=color,
markersize=7,
showgrid=True,
**kwargs,
)
kwargs = dict(fig=fig)
conf_value = norm.ppf(1 - alpha / 2.0) * np.sqrt(1.0 / ts_len)
plot_ts(
pd.Series([conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
name=f"{100 * (1-alpha)}% confidence interval",
fig=fig,
)
plot_ts(
pd.Series([-conf_value] * 2, index=[-0.5, nlags + 0.5]),
linestyle="--",
color="gray",
fig=fig,
)
display(fig)
nlags = 60
ts_len = 2000
step = 750
begin = 0
begins = np.arange(begin, 8001 - ts_len, step)
ends = np.arange(begin + ts_len, 8001, step)
means_and_stds_autocorr(begins, ends, title=None, subtitle=None, nlags=nlags)
%%html
<!--hide_me_please-->
<style>
.rendered_html {
font-size: 110%;
}
</style>
%%html
<!--hide_me_please-->
<p class="showoutput_stderr" id="first123456aa_stderr"><a href="javascript:output_stderr_toggle('#first123456_stderr')">show stderr outputs in the notebook</a></p>
<p class="hideoutput_stderr" id="first123456_stderr"><a href="javascript:output_stderr_toggle('#first123456aa_stderr')">hide stderr outputs in the notebook</a></p>
<p class="showoutput_error" id="first123456aa_error"><a href="javascript:output_error_toggle('#first123456_error')">show errors in the notebook</a></p>
<p class="hideoutput_error" id="first123456_error"><a href="javascript:output_error_toggle('#first123456aa_error')">hide errors in the notebook</a></p>
<p class="showtemporary_output" id="first123456aa_temporary_output"><a href="javascript:temporary_output_toggle('#first123456_temporary_output')">show temporary outputs in the notebook</a></p>
<p class="hidetemporary_output" id="first123456_temporary_output"><a href="javascript:temporary_output_toggle('#first123456aa_temporary_output')">hide temporary outputs in the notebook</a></p>
<p class="showtemporary_input" id="first123456aa_temporary_input"><a href="javascript:temporary_input_toggle('#first123456_temporary_input')">show temporary inputs in the notebook</a></p>
<p class="hidetemporary_input" id="first123456_temporary_input"><a href="javascript:temporary_input_toggle('#first123456aa_temporary_input')">hide temporary inputs in the notebook</a></p>
<p class="showdev_codes" id="first123456aa_dev_codes"><a href="javascript:dev_codes_toggle('#first123456_dev_codes')">show dev codes in the notebook</a></p>
<p class="hidedev_codes" id="first123456_dev_codes"><a href="javascript:dev_codes_toggle('#first123456aa_dev_codes')">hide dev codes in the notebook</a></p>
%%html
<!--hide_me_please-->
<script>
// you can change the following default display settings
code_show=false; // ordinary code celss
dev_codes_show=false; // notebook javascript and html codes hidden
output_text_show=true; // stdout (exluding temporary outputs)
output_stderr_show=true; // stderr (exluding temporary outputs)
output_error_show=true; // python interpreter errors (exluding temporary outputs)
temporary_output_show=false; // temporary outputs (from temporary code cells)
temporary_input_show=false; // temporary code cells
output_report_view=true; // report view or work view
function code_toggle(target_id, change=true){
if (change) {
code_show=!code_show;
}
if(code_show) {
$('.input').show();
$('.code_cell').css('padding', '2px');
$('.code_cell').css('border-width', '1px');
$(".hidecode").show();
$(".showcode").hide();
} else {
$('.input').hide();
$('.code_cell').css('padding', '0px');
$('.code_cell').css('border-width', '0px');
$('.hidecode').hide();
$('.showcode').show();
}
output_text_toggle('', false);
output_stderr_toggle('', false);
output_error_toggle('', false);
temporary_output_toggle('', false);
temporary_input_toggle('', false);
dev_codes_toggle('', false)
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function output_text_toggle(target_id, change=true){
if (change) {
output_text_show=!output_text_show;
}
if(output_text_show) {
$('.output_text').not(':contains("this_is_temporary_output")').show();
$(".hideoutput_text").show();
$(".showoutput_text").hide();
} else {
$('.output_text').not(':contains("this_is_temporary_output")').hide();
$('.hideoutput_text').hide();
$('.showoutput_text').show();
}
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function dev_codes_toggle(target_id, change=true){
if (change) {
dev_codes_show=!dev_codes_show;
}
if(dev_codes_show) {
$('.output_javascript:contains("hide_me_please")').show();
$('.input:contains("hide_me_please")').show();
$('.prompt:contains("hide_me_please")').show();
$('.code_cell:contains("hide_me_please")').css('padding', '2px');
$('.code_cell:contains("hide_me_please")').css('border-width', '1px');
$(".hidedev_codes").show();
$(".showdev_codes").hide();
} else {
$('.output_javascript:contains("hide_me_please")').hide();
$('.input:contains("hide_me_please")').hide();
$('.prompt:contains("hide_me_please")').hide();
$('.code_cell:contains("hide_me_please")').css('padding', '0px');
$('.code_cell:contains("hide_me_please")').css('border-width', '0px');
$(".hidedev_codes").hide();
$(".showdev_codes").show();
}
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function temporary_output_toggle(target_id, change=true){
if (change) {
temporary_output_show=!temporary_output_show;
}
if(temporary_output_show) {
$('.output_javascript:contains("this_is_temporary_output")').show();
$('.output_text:contains("this_is_temporary_output")').each(function(i, el) {
el.style.visibility = "visible";
node = el.parentNode.parentNode.parentNode;
node.style.display = "block";
node.style.visibility = "visible";
});
$('.output_error').each(function(i, node) {
parent = node.parentNode.parentNode.parentNode;
console.log(`error {node} in {parent}`);
console.log(parent.parentNode.innerText)
if (parent.parentNode.textContent.includes("this_is_temporary_cell")) {
console.log("show");
parent.style.display = "block";
parent.style.visibility = "visible";
}
});
$(".hidetemporary_output").show();
$(".showtemporary_output").hide();
} else {
$('.output_javascript:contains("this_is_temporary_output")').hide();
$('.output_text:contains("this_is_temporary_output")').each(function(i, el) {
el.style.visibility = "hidden";
node = el.parentNode.parentNode.parentNode;
node.style.display = "none";
node.style.visibility = "hidden";
});
$('.output_error').each(function(i, node) {
parent = node.parentNode.parentNode.parentNode;
console.log(`error {node} in {parent}`);
if (parent.parentNode.textContent.includes("this_is_temporary_cell")) {
console.log("hide");
parent.style.display = "none";
parent.style.visibility = "hidden";
}
});
$(".hidetemporary_output").hide();
$(".showtemporary_output").show();
}
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function temporary_input_toggle(target_id, change=true){
if (change) {
temporary_input_show=!temporary_input_show;
}
if(temporary_input_show) {
$('.output_javascript:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').show();
$('.input:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').show();
$('.prompt:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').show();
$('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('padding', '2px');
$('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('border-width', '1px');
$(".hidetemporary_input").show();
$(".showtemporary_input").hide();
} else {
$('.output_javascript:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').hide();
$('.input:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').hide();
$('.prompt:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').hide();
$('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('padding', '0px');
$('.code_cell:contains("this_is_temporary_cell")').not(':contains("hide_me_please")').css('border-width', '0px');
$(".hidetemporary_input").hide();
$(".showtemporary_input").show();
}
output_text_toggle('', false);
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function output_stderr_toggle(target_id, change=true){
if (change) {
output_stderr_show=!output_stderr_show;
}
if(output_stderr_show) {
$('.output_stderr').each(function(i, node) {
parent = node.parentNode.parentNode.parentNode.parentNode;
console.log(`stderr {node} in {parent}`);
if (!parent.textContent.includes("this_is_temporary_output")) {
console.log("show");
node.parentNode.style.display = "";
node.parentNode.style.visibility = "visible";
}
});
$(".hideoutput_stderr").show();
$(".showoutput_stderr").hide();
} else {
$('.output_stderr').each(function(i, node) {
parent = node.parentNode.parentNode.parentNode.parentNode;
console.log(`stderr {node} in {parent}`);
if (!parent.textContent.includes("this_is_temporary_output")) {
console.log("hide");
node.parentNode.style.display = "none";
node.parentNode.style.visibility = "hidden";
}
});
$('.hideoutput_stderr').hide();
$('.showoutput_stderr').show();
}
temporary_output_toggle('', false);
output_text_toggle('', false);
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function output_error_toggle(target_id, change=true){
if (change) {
output_error_show=!output_error_show;
}
if(output_error_show) {
$('.output_error').each(function(i, node) {
parent = node.parentNode.parentNode.parentNode;
console.log(`error {node} in {parent}`);
console.log(parent.parentNode.innerText)
if (!parent.parentNode.textContent.includes("this_is_temporary_cell")) {
console.log("show");
parent.style.display = "block";
parent.style.visibility = "visible";
}
});
$(".hideoutput_error").show();
$(".showoutput_error").hide();
} else {
$('.output_error').each(function(i, node) {
parent = node.parentNode.parentNode.parentNode;
console.log(`error {node} in {parent}`);
if (!parent.parentNode.textContent.includes("this_is_temporary_cell")) {
console.log("hide");
parent.style.display = "none";
parent.style.visibility = "hidden";
}
});
$('.hideoutput_error').hide();
$('.showoutput_error').show();
}
temporary_input_toggle('', false);
output_text_toggle('', false);
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function output_report_or_work_toggle(target_id, change=true) {
if (change) {
output_report_view=!output_report_view;
}
if(output_report_view) {
code_show=false;
code_toggle('', change=false);
dev_codes_show=false;
output_text_show=true;
output_stderr_show=false;
output_error_show=false;
temporary_output_show=false;
temporary_input_show=false;
$('.prompt, .input, .prompt_container').hide();
$('.out_prompt_overlay').hide();
$('.code_cell').css('padding', '0px');
$('.code_cell').css('border-width', '0px');
$(".showoutput_work_view").show();
$(".showoutput_report_view").hide();
} else {
code_show=true;
code_toggle('', change=false);
output_text_show=true;
output_stderr_show=true;
output_error_show=true;
$('.input, .prompt, .prompt_container').show();
$('.out_prompt_overlay').show();
$('.code_cell').css('padding', '2px');
$('.code_cell').css('border-width', '1px');
$('.showoutput_work_view').hide();
$('.showoutput_report_view').show();
}
dev_codes_toggle('', change=false);
temporary_output_toggle('', change=false);
temporary_input_toggle('', change=false);
output_text_toggle('', change=false);
output_stderr_toggle('', change=false);
output_error_toggle('', change=false);
if (target_id != "") {
$(document).ready(function() {
$(target_id).get(0).scrollIntoView();
})
}
}
function initialize(){
code_toggle('', change=false);
dev_codes_toggle('', change=false);
temporary_output_toggle('', change=false);
temporary_input_toggle('', change=false);
output_text_toggle('', change=false);
output_stderr_toggle('', change=false);
output_error_toggle('', change=false);
output_report_or_work_toggle('', change=false);
$(".showhide, .showcode, .showdev_codes, .hidedev_codes, .showtemporary_output, .hidetemporary_output, .showtemporary_input, .hidetemporary_input, .hidecode, .showoutput_text, .hideoutput_text, .showoutput_stderr, .hideoutput_stderr, .showoutput_error, .hideoutput_error, .showoutput_work_view, .showoutput_report_view").css('font-style', 'italic');
$(".showhide, .showcode, .showdev_codes, .hidedev_codes, .showtemporary_output, .hidetemporary_output, .showtemporary_input, .hidetemporary_input, .hidecode, .showoutput_text, .hideoutput_text, .showoutput_stderr, .hideoutput_stderr, .showoutput_error, .hideoutput_error, .showoutput_work_view, .showoutput_report_view").css('text-align', 'right');
$(".showhide, .showcode, .showdev_codes, .hidedev_codes, .showtemporary_output, .hidetemporary_output, .showtemporary_input, .hidetemporary_input, .hidecode, .showoutput_text, .hideoutput_text, .showoutput_stderr, .hideoutput_stderr, .showoutput_error, .hideoutput_error, .showoutput_work_view, .showoutput_report_view").css('font-size', '80%');
}
$(document).ready(initialize);
</script>