diff --git a/MAB_for_BCI_1/MAB_2.pdf b/MAB_for_BCI_1/MAB_2.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..95c593e558e11c02db1a2e9a9b1e6bef4e843f76
Binary files /dev/null and b/MAB_for_BCI_1/MAB_2.pdf differ
diff --git a/MAB_for_BCI_1/MAB_2.png b/MAB_for_BCI_1/MAB_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..789cf227a02b4020bf01676fc6fd98ac96e3c4ad
Binary files /dev/null and b/MAB_for_BCI_1/MAB_2.png differ
diff --git a/MAB_for_BCI_1/MAB_3.pdf b/MAB_for_BCI_1/MAB_3.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..d680f6398b9a17f8af31267360cc9e0432bf63e3
Binary files /dev/null and b/MAB_for_BCI_1/MAB_3.pdf differ
diff --git a/MAB_for_BCI_1/MAB_3.png b/MAB_for_BCI_1/MAB_3.png
new file mode 100644
index 0000000000000000000000000000000000000000..d962f3a58d4834e5f92b0bc98aae2f0599534c97
Binary files /dev/null and b/MAB_for_BCI_1/MAB_3.png differ
diff --git a/MAB_for_BCI_1/MAB_for_BCI_1.ipynb b/MAB_for_BCI_1/MAB_for_BCI_1.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..44e8461518110f4410658cef5e1ac818b37afac4
--- /dev/null
+++ b/MAB_for_BCI_1/MAB_for_BCI_1.ipynb
@@ -0,0 +1,635 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "19b05592",
+   "metadata": {},
+   "source": [
+    "# Example of Multi-armed Bandits in Brain Computer Interfaces"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "eba47bf9",
+   "metadata": {},
+   "source": [
+    "The example aims to provide a starting point for other researchers wanting to use multi-armed bandits in BCI systems. \n",
+    "\n",
+    "The example uses a data set with four motor imagery classes. \n",
+    "The aim is to find the two most easily distinguishable classes while reducing the needed training data to find these two classes. \n",
+    "The two classes could be used as 'yes' and 'no' in a BCI setting. \n",
+    "In other words, this example showcases how multi-armed bandits can be used during calibration to find suitable classes for a BCI system, similar to the ''One button BCI'' system in [1].\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "8c6a0f6f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# imports\n",
+    "# Moabb\n",
+    "from moabb.datasets import BNCI2014001\n",
+    "from moabb.paradigms import MotorImagery\n",
+    "\n",
+    "# Pyrieman\n",
+    "from pyriemann.classification import TSclassifier\n",
+    "from pyriemann.estimation import Covariances\n",
+    "\n",
+    "\n",
+    "# Sklearn\n",
+    "from sklearn.linear_model import LogisticRegression\n",
+    "from sklearn.pipeline import make_pipeline\n",
+    "from sklearn.model_selection import train_test_split, ShuffleSplit\n",
+    "\n",
+    "\n",
+    "\n",
+    "# Other imports\n",
+    "import numpy as np\n",
+    "from numpy.random import default_rng\n",
+    "from scipy.stats import beta\n",
+    "import matplotlib as mpl\n",
+    "import matplotlib.pyplot as plt\n",
+    "from itertools import combinations\n",
+    "from collections import Counter\n",
+    "import pickle\n",
+    "%matplotlib inline\n",
+    "plt.rcParams['font.family'] = \"Arial\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "17325a23",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Helper funcitons\n",
+    "rnd = default_rng(seed=42)\n",
+    "\n",
+    "def rnd_argmin(array):\n",
+    "    a = array + rnd.random(len(array))\n",
+    "    return np.argmin(a)\n",
+    "\n",
+    "def rnd_argmax(array):\n",
+    "    a = array + rnd.random(len(array))\n",
+    "    return np.argmax(a)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "d23da531",
+   "metadata": {},
+   "source": [
+    "## Data and feature extraction.\n",
+    "The used data set is from data set 2a in BCI competition 4 [2], downloaded with MOABB [3]. \n",
+    " "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "6b3f2f38",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def load_data(subj):\n",
+    "    fmin, fmax = 4, 32\n",
+    "    dataset = BNCI2014001() # BCI competition 4 dataset 2a\n",
+    "    raw = dataset.get_data(subjects=[1])[1]['session_T']['run_1'] \n",
+    "\n",
+    "    channels = raw.pick_types(eeg=True).ch_names\n",
+    "    sfreq = 250.\n",
+    "\n",
+    "    prgm = MotorImagery(n_classes=4, channels=channels, resample=sfreq, fmin=fmin, fmax=fmax)\n",
+    "\n",
+    "    X, labels, meta = prgm.get_data(dataset=dataset, subjects=[subj]) # Get data for specific subject. \n",
+    "    meta['groups'] = meta['session'] +'_' + meta['run']\n",
+    "\n",
+    "\n",
+    "    def relabel(l):\n",
+    "        if   l == 'left_hand' or l =='rest' : return 0\n",
+    "        elif l == 'right_hand'              : return 1\n",
+    "        elif l == 'feet'                    : return 2\n",
+    "        elif l == 'tongue'or l=='hands'     : return 3\n",
+    "    y = np.array([relabel(l) for l in labels])\n",
+    "\n",
+    "    return X,y,meta, labels"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "cc111b81",
+   "metadata": {},
+   "source": [
+    "Features are extracted as covariance matrices and classified with a tangent space logistic regression classifier using the pyRieman package [4]."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "df2f2cca",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model=make_pipeline(Covariances(),TSclassifier(clf=LogisticRegression(max_iter=1000,random_state=42)))\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "9db2b57a",
+   "metadata": {},
+   "source": [
+    "## Actions in Multi-Armed Bandit formulation\n",
+    "Each action has data from two classes and trains a classifier to distinguish between these two. \n",
+    "In the beginning, only one data sample from each class is available to the classifier (the classifier has validation data available at all times). \n",
+    "When the agent chooses an action, the corresponding classifier gets one more training data sample and retrains itself. \n",
+    "This is to simulate a BCI system's calibration process where new data successively gets available. \n",
+    "If the classification accuracy on the validation data is better after training than the previously best classification accuracy or if the classification accuracy is above 90\\% the agent gets reward 1, otherwise 0. \n",
+    "Thus, if the agent finds an action that gives 1 in reward, either the classifier is improving or has already reached a magic limit for good classification accuracy (90\\% in this example). "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "e639ef9e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "class Action(object):\n",
+    "    \"\"\"Class corresponding to actions in Multi-Armed Bandit formulation\"\"\"\n",
+    "    def __init__(self, x_data, y_data, name,model):\n",
+    "        super(Action, self).__init__()\n",
+    "\n",
+    "        self.wins = 1 # nbr of wins\n",
+    "        self.losses = 1 # nbr of losses\n",
+    "        # split data to validation and training data\n",
+    "        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(x_data, y_data, \n",
+    "                                                                                test_size=0.2,random_state=42)\n",
+    "        self.nbr_uses = 1 # nbr of times action used\n",
+    "        self.name = name # name of action\n",
+    "        self.model = model # model for classification\n",
+    "        self.best_accuracy = 0 # current classification accuracy.\n",
+    "        self.indices_training = np.zeros(len(self.y_train)) # Indices for data to use in training. \n",
+    "\n",
+    "        self._initiate()\n",
+    "\n",
+    "\n",
+    "    def _initiate(self):\n",
+    "        '''\n",
+    "        Initialize classifier and available data. \n",
+    "        '''\n",
+    "        self.indices_training = np.zeros(len(self.y_train)) # Indices for data to use in training.\n",
+    "        values, _ = np.unique(self.y_train, return_index=True) # What classes exist?\n",
+    "        for v in values:\n",
+    "            index = rnd_argmax(self.y_train == v) # Pick a random sample from each class \n",
+    "            self.indices_training[index] = 1 # Make this sample available for classification.\n",
+    "        self.model.fit(self.X_train[self.indices_training==1], self.y_train[self.indices_training==1]) # Train classifier with the available data (one sample from each class.)\n",
+    "        self.best_accuracy = self.model.score(self.X_test,self.y_test) # Store the current best accuracy.\n",
+    "\n",
+    "    def use_action(self):\n",
+    "        '''\n",
+    "        Use action.\n",
+    "        \n",
+    "        Returns:\n",
+    "            reward (int): 0 or 1. 1 if accuracy is better than ever before or greater than 0.9, else 0.\n",
+    "        '''\n",
+    "        self.nbr_uses += 1 # Count times action has been used. \n",
+    "        new_data_indix = rnd_argmin(self.indices_training) # Pick a random data point (not yet picked)\n",
+    "        self.indices_training[new_data_indix] = 1 # Add data point to avialiable data for classificaiton\n",
+    "\n",
+    "        # update model. \n",
+    "        self.model.fit(self.X_train[self.indices_training==1], self.y_train[self.indices_training==1]) # Train model with available data.\n",
+    "        new_accuracy = self.model.score(self.X_test,self.y_test) # Calculate accuracy.\n",
+    "        \n",
+    "        reward=1 if new_accuracy>=self.best_accuracy or new_accuracy>=0.9 else 0 # Reward for action. 1 if accuracy is better than ever before or greater than 0.9\n",
+    "\n",
+    "        if reward == 1: # Update beta distribution for success rate estimation.\n",
+    "            self.wins += 1\n",
+    "        else:\n",
+    "            self.losses += 1\n",
+    "\n",
+    "        if new_accuracy >= self.best_accuracy: # Update best accuracy.\n",
+    "            self.best_accuracy=new_accuracy \n",
+    "\n",
+    "        return reward\n",
+    "\n",
+    "    def sample_beta(self):\n",
+    "        '''\n",
+    "        sample win rate from beta distribution\n",
+    "        '''\n",
+    "        return beta.rvs(self.wins, self.losses, size=1)\n",
+    "\n",
+    "    def get_ucb_bound(self,step):\n",
+    "        '''\n",
+    "        Get upper bound for UCB algorithm.\n",
+    "        '''\n",
+    "        return self.wins/(self.wins+self.losses) + np.sqrt(2*np.log(step)/self.nbr_uses)\n",
+    "\n",
+    "    def ml_model(self):\n",
+    "        '''\n",
+    "        Return machine learning model for classification. \n",
+    "        '''\n",
+    "        return self.model\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0538ddc0",
+   "metadata": {},
+   "source": [
+    "## Run trial\n",
+    "One run has a time horizon of 350 steps, meaning that the agent chooses an action 350 times. \n",
+    "The random policy, Thompson sampling, UCB algorithm are implemented and compared. \n",
+    "The random policy can be interpreted as uniform data collection during the calibration phase, meaning that data from all classes are collected, and then all possible class combinations are tested. \n",
+    "With Thompson sampling and the UCB algorithm, calibration data is selected to fit the best action, which reduces unnecessary data collection.\n",
+    "\n",
+    "A few runs with different data split into training data, validation data, and test data are averaged to get the results. \n",
+    "Training data is used for training, validation data for giving rewards to the agent, and the test data to verify the classification accuracy."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "29d57741",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def run_trials(x_data, y_data,labels,policy='random', time_horizon=50,repeats=2):\n",
+    "    '''\n",
+    "    Run repeated trials of MAB with one policy.\n",
+    "    '''\n",
+    "    # Save some data for testing.\n",
+    "    rs = ShuffleSplit(n_splits=repeats, test_size=.2, random_state=42) # Split data to test, train set. \n",
+    "    \n",
+    "    # Placeholders for results\n",
+    "    rewards = np.zeros((repeats,time_horizon))\n",
+    "    accuracys = np.zeros((repeats,time_horizon))\n",
+    "    chosen_actions = np.empty((repeats,time_horizon), dtype=object)\n",
+    "    iteration = 0 # Counter for number of repeats. \n",
+    "    \n",
+    "    # Get a list of all combinations of classes\n",
+    "    class_combinations = []\n",
+    "    for c in combinations(np.unique(labels), 2): # the int says how many classes to combine, in this case two of 'left_hand','right_hand', 'tongue', and 'feet'\n",
+    "        class_combinations.append('_x_'.join(c)) # e.g. 'right_hand_x_left_hand'\n",
+    "\n",
+    "    for train_index, test_index in rs.split(labels):\n",
+    "        \n",
+    "        # Split data to training and test data.\n",
+    "        x_train = x_data[train_index]\n",
+    "        y_train = y_data[train_index]\n",
+    "        labels_train = labels[train_index]\n",
+    "        x_test = x_data[test_index]\n",
+    "        y_test = y_data[test_index]\n",
+    "        labels_test = labels[test_index]\n",
+    "\n",
+    "        # Create list of actions\n",
+    "        actions = []\n",
+    "        for c in class_combinations:\n",
+    "            classes = c.split('_x_') # get list of classes e.g. ['right_hand','left_hand']\n",
+    "            actions.append(Action(x_data=x_train[(labels_train==classes[0]) | (labels_train==classes[1])],\n",
+    "                                y_data=y_train[(labels_train==classes[0]) | (labels_train==classes[1])],\n",
+    "                                name=c, \n",
+    "                                model=make_pipeline(Covariances(),TSclassifier(clf=LogisticRegression(max_iter=1000,random_state=42)))))\n",
+    "\n",
+    "\n",
+    "        # One round of actions\n",
+    "        print(f'Start of loop {iteration+1}/{repeats}')\n",
+    "        for t in range(time_horizon):\n",
+    "            \n",
+    "            # Pick next action\n",
+    "            if policy == 'random':\n",
+    "                action_index = rnd.integers(0,len(actions),1)[0]\n",
+    "            elif policy == 'thompson':\n",
+    "                est_win_rate = np.zeros(len(actions))\n",
+    "                for i in range(len(actions)):\n",
+    "                    est_win_rate[i] = actions[i].sample_beta()\n",
+    "                action_index = rnd_argmax(est_win_rate)\n",
+    "            elif policy == 'ucb':\n",
+    "                est_mean_ucb = np.zeros(len(actions))\n",
+    "                for i in range(len(actions)):\n",
+    "                    est_mean_ucb[i] = actions[i].get_ucb_bound(t+1)\n",
+    "                action_index = rnd_argmax(est_mean_ucb)\n",
+    "\n",
+    "            # Use that action and store reward\n",
+    "            rewards[iteration,t] = actions[action_index].use_action()\n",
+    "            # Calculate accuracy of classifier on test data. \n",
+    "            accuracys[iteration,t] = actions[action_index].ml_model().score(x_test[(labels_test==classes[0]) | (labels_test==classes[1])],y_test[(labels_test==classes[0]) | (labels_test==classes[1])])\n",
+    "            # Store what action was chosen.\n",
+    "            chosen_actions[iteration,t] = actions[action_index].name\n",
+    "        iteration += 1\n",
+    "\n",
+    "        # delete action objects to save space. \n",
+    "        actions.clear() \n",
+    "        del actions[:]\n",
+    "        del actions\n",
+    "    return rewards, accuracys,chosen_actions"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "17c2747d",
+   "metadata": {},
+   "source": [
+    "Run for different policies."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "80cbd817",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Choosing from all possible events\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Data loaded\n",
+      "Thompson policy\n",
+      "Start of loop 1/5\n"
+     ]
+    }
+   ],
+   "source": [
+    "X,y,meta,labels = load_data(1)\n",
+    "print('Data loaded')\n",
+    "\n",
+    "\n",
+    "time_horizon = 350\n",
+    "repeats = 5\n",
+    "print('Thompson policy')\n",
+    "rewards_t, accuracys_t,actions_t = run_trials(X,y,labels,'thompson',time_horizon,repeats)\n",
+    "print('Random policy')\n",
+    "rewards_r, accuracys_r,actions_r = run_trials(X,y,labels,'random',time_horizon,repeats)\n",
+    "print('UCB policy')\n",
+    "rewards_u, accuracys_u,actions_u = run_trials(X,y,labels,'ucb',time_horizon,repeats)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e391a690",
+   "metadata": {},
+   "source": [
+    "Save data from run in case one wants to replot at a later point. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c7834f0f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "    # Save data in case!!!\n",
+    "    with open(\"rewards_t\", \"wb\") as file:   \n",
+    "        pickle.dump(rewards_t, file)\n",
+    "    with open(\"accuracys_t\", \"wb\") as file:   \n",
+    "        pickle.dump(accuracys_t, file)\n",
+    "    with open(\"actions_t\", \"wb\") as file:   \n",
+    "        pickle.dump(actions_t, file)\n",
+    "\n",
+    "    with open(\"rewards_r\", \"wb\") as file:   \n",
+    "        pickle.dump(rewards_r, file)\n",
+    "    with open(\"accuracys_r\", \"wb\") as file:   \n",
+    "        pickle.dump(accuracys_r, file)\n",
+    "    with open(\"actions_r\", \"wb\") as file:   \n",
+    "        pickle.dump(actions_r, file)\n",
+    "\n",
+    "    with open(\"rewards_u\", \"wb\") as file:   \n",
+    "        pickle.dump(rewards_u, file)\n",
+    "    with open(\"accuracys_u\", \"wb\") as file:   \n",
+    "        pickle.dump(accuracys_u, file)\n",
+    "    with open(\"actions_u\", \"wb\") as file:   \n",
+    "        pickle.dump(actions_u, file)\n",
+    "\n",
+    "    # Load data\n",
+    "    \n",
+    "    # with open(\"rewards_t\", \"rb\") as file:   \n",
+    "    #     rewards_t = pickle.load(file)\n",
+    "    # with open(\"accuracys_t\", \"rb\") as file:   \n",
+    "    #     accuracys_t = pickle.load(file)\n",
+    "    # with open(\"actions_t\", \"rb\") as file:   \n",
+    "    #     actions_t = pickle.load(file)\n",
+    "\n",
+    "    # with open(\"rewards_r\", \"rb\") as file:   \n",
+    "    #     rewards_r = pickle.load(file)\n",
+    "    # with open(\"accuracys_r\", \"rb\") as file:   \n",
+    "    #     accuracys_r = pickle.load(file)\n",
+    "    # with open(\"actions_r\", \"rb\") as file:   \n",
+    "    #     actions_r = pickle.load(file)\n",
+    "\n",
+    "    # with open(\"rewards_u\", \"rb\") as file:   \n",
+    "    #     rewards_u = pickle.load(file)\n",
+    "    # with open(\"accuracys_u\", \"rb\") as file:   \n",
+    "    #     accuracys_u = pickle.load(file)\n",
+    "    # with open(\"actions_u\", \"rb\") as file:   \n",
+    "    #     actions_u = pickle.load(file)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "edbe1c27",
+   "metadata": {},
+   "source": [
+    "## Results and Discussion\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f7564b5d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "steps=5\n",
+    "fig, ax = plt.subplots(2,1,dpi=100,figsize=(4.8,4.8))\n",
+    "a = np.mean(rewards_r,axis=0)\n",
+    "ax[0].plot(range(0,time_horizon,steps),[sum(a[i:i+steps])/len(a[i:i+steps]) for i in range(0,len(a),steps)],'k:',label='Random')\n",
+    "a = np.mean(accuracys_r,axis=0)\n",
+    "ax[1].plot(range(0,time_horizon,steps),[sum(a[i:i+steps])/len(a[i:i+steps]) for i in range(0,len(a),steps)],'k:',label='Random')\n",
+    "\n",
+    "a = np.mean(rewards_t,axis=0)\n",
+    "ax[0].plot(range(0,time_horizon,steps),[sum(a[i:i+steps])/len(a[i:i+steps]) for i in range(0,len(a),steps)],'k-',label='Thompson')\n",
+    "a = np.mean(accuracys_t,axis=0)\n",
+    "ax[1].plot(range(0,time_horizon,steps),[sum(a[i:i+steps])/len(a[i:i+steps]) for i in range(0,len(a),steps)],'k-',label='Thompson')\n",
+    "\n",
+    "a = np.mean(rewards_u,axis=0)\n",
+    "ax[0].plot(range(0,time_horizon,steps),[sum(a[i:i+steps])/len(a[i:i+steps]) for i in range(0,len(a),steps)],'k--',label='UCB')\n",
+    "a = np.mean(accuracys_u,axis=0)\n",
+    "ax[1].plot(range(0,time_horizon,steps),[sum(a[i:i+steps])/len(a[i:i+steps]) for i in range(0,len(a),steps)],'k--',label='UCB')\n",
+    "\n",
+    "ax[1].plot([0,time_horizon],[0.5,0.5], 'k-.',label='Chance level')\n",
+    "\n",
+    "ax[0].legend(loc='lower right')\n",
+    "ax[0].set_xlabel(r'Time step')\n",
+    "ax[0].set_ylabel(r'Average reward')\n",
+    "ax[0].set_title(r'Reward')\n",
+    "ax[0].set_ylim([0,1])\n",
+    "\n",
+    "ax[1].legend(loc='lower right')\n",
+    "ax[1].set_xlabel(r'Time step')\n",
+    "ax[1].set_ylabel(r'Average accuracy for chosen action')\n",
+    "ax[1].set_title(r'Accuracy')\n",
+    "ax[1].set_ylim([0.47,1])\n",
+    "ax[1].set_yticks(np.arange(0.5, 1.01, step=0.1))\n",
+    "\n",
+    "plt.tight_layout()\n",
+    "plt.savefig('MAB_2.png')\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "9f14ea65",
+   "metadata": {},
+   "source": [
+    "The figure above shows the average reward per time step. \n",
+    "The Thompson sampling and the UCB algorithm find an action that gives a good reward in each step, while the random action does not find a good action. \n",
+    "Finding a good action means that the corresponding two classes are easily distinguishable. \n",
+    "\n",
+    "\n",
+    "The figure also shows the average accuracy on test data for the action the agent chose. \n",
+    "We see that the Thompson sampling and UCB algorithm, on average, choose actions with higher accuracy than the random policy, indicating that using multi-armed bandits to find distinguishable classes is a good approach.  \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bf271392",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plt.figure(dpi=100,figsize=(4.8,4.8))\n",
+    "actions = np.unique(actions_r[0]) # Get all possible classes\n",
+    "actions.sort()\n",
+    "# Random\n",
+    "c = Counter()\n",
+    "for i in range(len(actions_r)):\n",
+    "    c.update(actions_r[i])\n",
+    "c = dict(c)\n",
+    "actions_count = np.zeros(len(actions))\n",
+    "for i in range(len(actions_count)):\n",
+    "    actions_count[i] = c.get(actions[i],0)\n",
+    "plt.bar(x = np.array(range(len(actions))),\n",
+    "        height = actions_count/repeats/time_horizon,\n",
+    "        width=0.2, \n",
+    "        tick_label = actions,\n",
+    "        label='Random',\n",
+    "        color='black',\n",
+    "        alpha=0.2)\n",
+    "# Thompson\n",
+    "c = Counter()\n",
+    "for i in range(len(actions_t)):\n",
+    "    c.update(actions_t[i])\n",
+    "c = dict(c)\n",
+    "actions_count = np.zeros(len(actions))\n",
+    "for i in range(len(actions_count)):\n",
+    "    actions_count[i] = c.get(actions[i],0)\n",
+    "plt.bar(x = np.array(range(len(actions)))+0.25,\n",
+    "        height = actions_count/repeats/time_horizon, \n",
+    "        tick_label = actions,\n",
+    "        width=0.2,\n",
+    "        label='Thompson',\n",
+    "        color='black',\n",
+    "        alpha=0.5)\n",
+    "# UCB\n",
+    "c = Counter()\n",
+    "for i in range(len(actions_u)):\n",
+    "    c.update(actions_u[i])\n",
+    "c = dict(c)\n",
+    "actions_count = np.zeros(len(actions))\n",
+    "for i in range(len(actions_count)):\n",
+    "    actions_count[i] = c.get(actions[i],0)\n",
+    "plt.bar(x = np.array(range(len(actions)))+0.5,\n",
+    "        height = actions_count/repeats/time_horizon, \n",
+    "        tick_label = actions,\n",
+    "        width=0.2,\n",
+    "        label='UCB',\n",
+    "        color='black',\n",
+    "        alpha=0.7)\n",
+    "\n",
+    "plt.xticks(rotation=90)\n",
+    "plt.ylabel('Ratio of times action chosen')\n",
+    "plt.legend()\n",
+    "\n",
+    "plt.tight_layout()\n",
+    "plt.savefig('MAB_3.png')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "77cbf21e",
+   "metadata": {},
+   "source": [
+    "\n",
+    "The figure above shows the ratio of times each action was chosen, averaged over ten runs. \n",
+    "The random policy chooses all actions as often, while both the Thompson sampling and UCB seem to prefer the right hand vs. tongue action. \n",
+    "Thus, it is probable that the right hand vs. tongue are most easily distinguishable. \n",
+    "Even though the right hand vs. tongue seem to be best, it does not necessarily mean that these two classes would have been best if all data were available. \n",
+    "However, in this setting with an increasing amount of data, the classification accuracy often improved with new data or reached above 90\\% fast, which meant that the agent got reward 1 often when choosing this action.\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "53d63333",
+   "metadata": {},
+   "source": [
+    "\n",
+    "## Conclusion\n",
+    "The purpose of this example was to provide code for multi-armed bandits on BCI data. \n",
+    "The reader can, for example, use this code to implement their algorithms for this problem and compare with the random policy, Thompson sampling, and UCB algorithm. \n",
+    "Alternatively,  the reader can use this as a starting point for other BCI systems with multi-armed bandits."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "49793af6",
+   "metadata": {},
+   "source": [
+    "## References\n",
+    "\n",
+    "[1] Fruitet, J., Carpentier, A., Munos, R., and Clerc, M. (2012). Bandit Algorithms boost Brain Computer359\n",
+    "Interfaces for motor-task selection of a brain-controlled button. In _Advances in Neural Information360\n",
+    "Processing Systems_, eds. P. Bartlett, F. C. N. Pereira, C. J. C. Burges, L. Bottou, and K. Q. Weinberger361\n",
+    "(Lake Tahoe, Nevada, United States: Neural Information Processing Systems (NIPS) Foundation),362\n",
+    "vol. 25, 458–466\n",
+    "\n",
+    "[2] Tangermann, M., Müller, K.-R., Aertsen, A., Birbaumer, N., Braun, C., Brunner, C., Leeb, R., Mehring, C., Miller, K., Mueller-Putz, G., Nolte, G., Pfurtscheller, G., Preissl, H., Schalk, G., Schlögl, A., Vidaurre, C., Waldert, S., Blankertz, B., 2012. Review of the BCI Competition IV. _Frontiers in Neuroscience_ 6, 55. https://doi.org/10.3389/fnins.2012.00055\n",
+    "\n",
+    "\n",
+    "[3] Jayaram, V., Barachant, A., 2018. MOABB: trustworthy algorithm benchmarking for BCIs. _J. Neural Eng._ 15, 066011. https://doi.org/10.1088/1741-2552/aadea0\n",
+    "\n",
+    "[4] pyRiemann Contributors, n.d. pyRiemann: Biosignals classification with Riemannian Geometry. URL: https://pyriemann.readthedocs.io/en/latest/index.html (accessed 3.9.22).\n",
+    "\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/MAB_for_BCI_1/README.md b/MAB_for_BCI_1/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..61022b8fe12118fde95fb41f449d34e9b2d11123
--- /dev/null
+++ b/MAB_for_BCI_1/README.md
@@ -0,0 +1,27 @@
+# MAB for BCI - example 1
+
+## Aim of example
+The purpose of this example is to showcase how multi-armed bandits can be used for BCI systems. The reader is welcome to use this code as a startingpoint for other projects.
+
+## Overview
+The example uses a data set with four motor imagery classes. 
+The aim is to find the two most easily distinguishable classes while reducing the needed training data to find these two classes. 
+The two classes could be used as 'yes' and 'no' in a BCI setting. 
+In other words, this example showcases how multi-armed bandits can be used during calibration to find suitable classes for a BCI system, similar to the ''One button BCI'' system in [1].
+
+See more details in the [`MAB_for_BCI_1.ipynb`](MAB_for_BCI_1.ipynb) file.
+
+## Output from example
+The following two images are generaded from the example. The first, comparing different algorithms regarding received reward and classification accuracy in each step. The second, what actions were chosen by the policy.
+
+![](MAB_2.png)
+
+![](MAB_3.png)
+
+
+## References
+[1] Fruitet, J., Carpentier, A., Munos, R., and Clerc, M. (2012). Bandit Algorithms boost Brain Computer359
+Interfaces for motor-task selection of a brain-controlled button. In _Advances in Neural Information360
+Processing Systems_, eds. P. Bartlett, F. C. N. Pereira, C. J. C. Burges, L. Bottou, and K. Q. Weinberger361
+(Lake Tahoe, Nevada, United States: Neural Information Processing Systems (NIPS) Foundation),362
+vol. 25, 458–466
\ No newline at end of file
diff --git a/MAB_for_BCI_1/accuracys_r b/MAB_for_BCI_1/accuracys_r
new file mode 100644
index 0000000000000000000000000000000000000000..b1e54a59cc1a1d5303adff079f3897761286b150
Binary files /dev/null and b/MAB_for_BCI_1/accuracys_r differ
diff --git a/MAB_for_BCI_1/accuracys_t b/MAB_for_BCI_1/accuracys_t
new file mode 100644
index 0000000000000000000000000000000000000000..c803228097a25ebeca165dd3d58463fc80aebf8f
Binary files /dev/null and b/MAB_for_BCI_1/accuracys_t differ
diff --git a/MAB_for_BCI_1/accuracys_u b/MAB_for_BCI_1/accuracys_u
new file mode 100644
index 0000000000000000000000000000000000000000..82174c566cd88bd6cf83b51280d6d0ab64969a1d
Binary files /dev/null and b/MAB_for_BCI_1/accuracys_u differ
diff --git a/MAB_for_BCI_1/actions_r b/MAB_for_BCI_1/actions_r
new file mode 100644
index 0000000000000000000000000000000000000000..94a116b17973fcf6148eb1020b7fc2d685ce950a
Binary files /dev/null and b/MAB_for_BCI_1/actions_r differ
diff --git a/MAB_for_BCI_1/actions_t b/MAB_for_BCI_1/actions_t
new file mode 100644
index 0000000000000000000000000000000000000000..22afa6bb3b4d828a402f4cde382e9f047f29aad9
Binary files /dev/null and b/MAB_for_BCI_1/actions_t differ
diff --git a/MAB_for_BCI_1/actions_u b/MAB_for_BCI_1/actions_u
new file mode 100644
index 0000000000000000000000000000000000000000..c31b556f3df8a332221abc1ce5f3c63875029f3a
Binary files /dev/null and b/MAB_for_BCI_1/actions_u differ
diff --git a/MAB_for_BCI_1/rewards_r b/MAB_for_BCI_1/rewards_r
new file mode 100644
index 0000000000000000000000000000000000000000..3d018101ff87e45f537eb723a1a46cddd44e1d88
Binary files /dev/null and b/MAB_for_BCI_1/rewards_r differ
diff --git a/MAB_for_BCI_1/rewards_t b/MAB_for_BCI_1/rewards_t
new file mode 100644
index 0000000000000000000000000000000000000000..32c5d12c8ba090640a06929f34e3441d5a581b53
Binary files /dev/null and b/MAB_for_BCI_1/rewards_t differ
diff --git a/MAB_for_BCI_1/rewards_u b/MAB_for_BCI_1/rewards_u
new file mode 100644
index 0000000000000000000000000000000000000000..fb8ddc8c46eaa1798515695e6f64695209852252
Binary files /dev/null and b/MAB_for_BCI_1/rewards_u differ