{
"cells": [
{
"cell_type": "markdown",
"id": "32647951-7dcf-4553-b42a-5d8508d95cf9",
"metadata": {},
"source": [
"
"
]
},
{
"cell_type": "markdown",
"id": "38b445ff-7cf7-442e-bb25-83afe6d5a3fb",
"metadata": {},
"source": [
"\n",
"# Sensory Coding: Cockroach mechanoreceptors\n",
"\n",
"Cockroach mechanoreceptor response properties. \n",
"\n",
"Cluster-based rate analysis of potential multi-unit activity.\n",
"\n",
"Trial-averaged spiking responses. \n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"id": "f07451ff-cf56-4763-b4ac-bd099ba1f145",
"metadata": {},
"source": [
"\n",
"# Table of Contents\n",
"\n",
"- [Introduction](#intro)\n",
"- [Setup](#setup)\n",
"- [Part I. Load Data](#one)\n",
"- [Part II. Event Detection](#two)\n",
"- [Part III. Event Clustering](#three)\n",
"- [Part IV. Clustered events and instantaneous rate](#four)\n",
"- [Part V. Trial-Averaged Analysis](#five)"
]
},
{
"cell_type": "markdown",
"id": "488d476b-9387-43d7-b4bf-0528d9f4950a",
"metadata": {},
"source": [
"\n",
"# Setup"
]
},
{
"cell_type": "markdown",
"id": "b94f344a-8c2a-4080-b972-8e7a30c91e66",
"metadata": {},
"source": [
"Import and define functions"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "be70dacf-b862-4afa-8fc0-dce8c363fdf9",
"metadata": {
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title {display-mode: \"form\" }\n",
"\n",
"#@markdown Run this code cell to import packages and define functions \n",
"import numpy as np\n",
"import pandas as pd\n",
"from scipy import ndimage\n",
"from scipy.signal import find_peaks\n",
"from copy import deepcopy\n",
"import math\n",
"from sklearn.decomposition import PCA\n",
"from sklearn.cluster import KMeans\n",
"from pathlib import Path\n",
"import matplotlib.pyplot as plt\n",
"import plotly.express as px\n",
"import plotly.graph_objects as go\n",
"from plotly.subplots import make_subplots\n",
"import csv\n",
"from scipy.signal import hilbert,medfilt,resample\n",
"from sklearn.decomposition import PCA\n",
"import scipy\n",
"import seaborn as sns\n",
"from ipywidgets import interactive, HBox, VBox, widgets, interact\n",
"\n",
"from datetime import datetime,timezone,timedelta\n",
"pal = sns.color_palette(n_colors=15)\n",
"pal = pal.as_hex()\n",
"\n",
"print('Task completed at ' + str(datetime.now(timezone(-timedelta(hours=5)))))"
]
},
{
"cell_type": "markdown",
"id": "f91a09e1-c305-4809-8302-c1fad4140efa",
"metadata": {},
"source": [
"Mount Google Drive"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2b3a4500-470b-44e3-8572-a0d55f3bc1cb",
"metadata": {
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title {display-mode: \"form\" }\n",
"\n",
"#@markdown Run this cell to mount your Google Drive. \n",
"\n",
"from google.colab import drive\n",
"drive.mount('/content/drive')\n",
"\n",
"print('Task completed at ' + str(datetime.now(timezone(-timedelta(hours=5)))))"
]
},
{
"cell_type": "markdown",
"id": "61aa1776-ad44-4487-ad14-9dfe050de305",
"metadata": {},
"source": [
"Import data digitized with *Nidaq USB6211* and recorded using *Bonsai-rx* as a *.bin* file"
]
},
{
"cell_type": "markdown",
"id": "b7b40cda-55dd-4604-afb7-89e5d0731983",
"metadata": {},
"source": [
"\n",
"# Part I. Load Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af30df99-a760-4190-b459-a1205e7d90fd",
"metadata": {
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title {display-mode: \"form\" }\n",
"\n",
"#@markdown Specify the file path \n",
"#@markdown to your recorded data on Drive (find the filepath in the colab file manager:\n",
"\n",
"filepath = \"full filepath goes here\" #@param \n",
"filepath = \"/Users/kperks/mnt/OneDrive - wesleyan.edu/Teaching/Neurophysiology_FA21/Data/CockroachSensoryPhysiology/20211020_30k/allfeaturesonce_diffspikes_KP2021-10-20T15_18_21.bin\"\n",
"\n",
"#@markdown Specify the sampling rate and number of channels recorded.\n",
"\n",
"# sampling_rate = NaN #@param\n",
"# number_channels = NaN #@param\n",
"sampling_rate = 30000 #@param\n",
"number_channels = 1 #@param\n",
"\n",
"downsample = False #@param\n",
"newfs = 10000 #@param\n",
"\n",
"#@markdown After you have filled out all form fields, \n",
"#@markdown run this code cell to load the data. \n",
"\n",
"filepath = Path(filepath)\n",
"\n",
"# No need to edit below this line\n",
"#################################\n",
"data = np.fromfile(Path(filepath), dtype = np.float64)\n",
"if number_channels>1:\n",
" data = data.reshape(-1,number_channels)\n",
"data = data-np.median(data)\n",
"dur = np.shape(data)[0]/sampling_rate\n",
"print('duration of recording was %0.2f seconds' %dur)\n",
"\n",
"fs = sampling_rate\n",
"if downsample:\n",
" newfs = 2500 #downsample emg data\n",
" chunksize = int(sampling_rate/newfs)\n",
" if number_channels>1:\n",
" data = data[0::chunksize,:]\n",
" if number_channels==1:\n",
" data = data[0::chunksidze]\n",
" fs = int(np.shape(data)[0]/dur)\n",
"\n",
"time = np.linspace(0,dur,np.shape(data)[0])\n",
"\n",
"\n",
"print('Now be a bit patient while it plots.')\n",
"\n",
"f = go.FigureWidget(layout=go.Layout(height=500, width=800))\n",
"if number_channels == 1:\n",
" f.add_trace(go.Scatter(x = time[0:fs], y = data[0:fs],\n",
" name=str(chan),opacity=1))\n",
"if number_channels>1:\n",
" for i,chan in enumerate(range(number_channels)):\n",
" f.add_trace(go.Scatter(x = time[0:fs], y = data[0:fs,chan],\n",
" name=str(chan),opacity=1))\n",
"\n",
"slider = widgets.FloatRangeSlider(\n",
" min=0,\n",
" max=dur,\n",
" value=(0,1),\n",
" step= 1,\n",
" readout=False,\n",
" description='Time')\n",
"slider.layout.width = '800px'\n",
"\n",
"# our function that will modify the xaxis range\n",
"def response(x):\n",
" with f.batch_update():\n",
" starti = int(x[0]*fs)\n",
" stopi = int(x[1]*fs)\n",
" \n",
" if number_channels == 1:\n",
" f.data[0].x = time[starti:stopi]\n",
" f.data[0].y = data[starti:stopi]\n",
" if number_channels > 1:\n",
" for i in range(number_channels):\n",
" f.data[i].x = time[starti:stopi]\n",
" f.data[i].y = data[starti:stopi,i]\n",
"\n",
"vb = VBox((f, interactive(response, x=slider)))\n",
"vb.layout.align_items = 'center'\n",
"vb\n"
]
},
{
"cell_type": "markdown",
"id": "dd727a14-c83f-4bc1-91d7-59cb24049341",
"metadata": {},
"source": [
"\n",
"# Part II. Event Detection"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fe76c659-14d4-4d7d-885a-bc20cfb6eb1f",
"metadata": {
"cellView": "form",
"id": "iE2MylaxTGpL",
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title { display-mode: \"form\" }\n",
"\n",
"#@markdown TASK: Type in the start and stop time (in seconds) \n",
"#@markdown that you want to focus on in the recording.\n",
"# start_time = None#@param {type: \"number\"}\n",
"# stop_time = None #@param {type: \"number\"}\n",
"# #@markdown TASK: Type in an appropriate event threshold amplitude for detection.\n",
"# threshold = None #@param {type: \"number\"}\n",
"# #@markdown TASK: Then from the dropdown, select a polarity (whether peaks are up or down)\n",
"# peaks = \"up\" #@param ['select peak direction','up', 'down']\n",
"# #@markdown TASK: Finally, RUN this cell to set these values.\n",
"\n",
"start_time = 42 #@param {type: \"number\"}\n",
"stop_time = 52 #@param {type: \"number\"}\n",
"#@markdown TASK: Type in an appropriate event threshold amplitude for detection.\n",
"threshold = 0.016 #@param {type: \"number\"}\n",
"#@markdown TASK: Then from the dropdown, select a polarity (whether peaks are up or down)\n",
"peaks = \"down\" #@param ['select peak direction','up', 'down']\n",
"#@markdown TASK: Finally, RUN this cell to set these values.\n",
"\n",
"spike_detection_threshold = threshold\n",
"\n",
"if peaks=='up': polarity = 1\n",
"if peaks=='down': polarity=-1\n",
"\n",
"#@markdown After the values are set, the emg signal will be processed to detect events (peaks).\n",
"#@markdown \"PCA\" (principle component analysis) will be applied to determine \n",
"#@markdown the fundamental waveform shapes across all events.\n",
"#@markdown
You will see a histogram of event peak amplitudes \n",
"#@markdown as well as a plot of waveform PCs (principle components).\n",
"min_isi = 0.001 #seconds\n",
"\n",
"# samples_inwin = samples[int(start_time/sample_rate):int(stop_time/sample_rate)]\n",
"peaks,props = find_peaks(polarity * data,height=spike_detection_threshold, \n",
" prominence = spike_detection_threshold,\n",
" distance=int(min_isi*fs))\n",
"peaks_t = peaks/fs\n",
"inwin_inds = ((peaks_t>start_time) & (peaks_t\n",
"# Part III. Event Clustering"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38cc3762-d544-46d0-a231-493f6c43941c",
"metadata": {
"cellView": "form",
"id": "4e574ce9-d314-4a20-918b-f2496260c9a8",
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title { display-mode: \"form\" }\n",
"\n",
"#@markdown Let's start by clustering the events into putative motor units. { display-mode: \"form\" }\n",
"#@markdown Choose the number of clusters you want to split the data into and type that number below.
\n",
"#@markdown >Note: It can sometimes help to \"over-split\" the events into more clusters \n",
"#@markdown than you think will be necessary. You can try both strategies and assess the results.\n",
"number_of_clusters = None #@param {type: \"number\"}\n",
"#@markdown RUN this cell to cluster events categorically based on waveform shape (in PC space) and amplitude. \n",
"#@markdown
As a result, you will see a plot of the mean waveform from each cluster (with standard deviation shaded)\n",
"\n",
"number_of_clusters = 3 #@param {type: \"number\"}\n",
"\n",
"# No need to edit below this line\n",
"#################################\n",
"\n",
"kmeans = KMeans(n_clusters=number_of_clusters).fit(df_data)\n",
"# df_props['peaks_t'] = peaks_t\n",
"df_props['cluster'] = kmeans.labels_\n",
"\n",
"winsamps = int(windur * fs)\n",
"x = np.linspace(-windur,windur,winsamps*2)*1000\n",
"hfig,ax = plt.subplots(1,figsize=(10,8))\n",
"ax.set_ylabel('Volts recorded')\n",
"ax.set_xlabel('milliseconds')\n",
"\n",
"# fig = go.Figure()\n",
"\n",
"for k in np.unique(df_props['cluster']):\n",
" spkt = df_props.loc[df_props['cluster']==k]['spikeT'].values #['peaks_t'].values\n",
" spkt = spkt[(spkt>windur) & (spkt<(len((data)/fs)-windur))]\n",
" print(str(len(spkt)) + \" spikes in cluster number \" + str(k))\n",
" spkwav = np.asarray([data[(int(t*fs)-winsamps):(int(t*fs)+winsamps)] for t in spkt])\n",
" wav_u = np.mean(spkwav,0)\n",
" wav_std = np.std(spkwav,0)\n",
" # fig.add_trace(go.Scatter(x = x, y = wav_u,line_color=pal[k],name='cluster ' + str(k)),\n",
" # row=1,col=1)\n",
" ax.plot(x,wav_u,linewidth = 3,label='cluster '+ str(k),color=pal[k])\n",
" ax.fill_between(x, wav_u-wav_std, wav_u+wav_std, alpha = 0.25,color=pal[k])\n",
"# fig.update_layout(xaxis_title=\"time(seconds)\", yaxis_title='amplitude',width=500, height=500)\n",
"plt.legend(bbox_to_anchor=[1.25,1]);\n",
"\n",
"print('Tasks completed at ' + str(datetime.now(timezone(-timedelta(hours=5)))))\n"
]
},
{
"cell_type": "markdown",
"id": "29684cba-29ad-4551-bfed-eaf7a201e6df",
"metadata": {
"id": "9iyWsThmXdI_"
},
"source": [
"If there are multiple spike clusters you want to merge into a single cell class, *edit and run* the cell below.\n",
"\n",
"> **merge_cluster_list** = a list of the clusters (identified by numbers associated with the colors specified in the legend above).\n",
" - **For example**, the folowing list would merge clusters 0 and 2 together and 1, 4, and 3 together:
\n",
" **merge_cluster_list = [[0,2],[1,4,3]]**\n",
" - For each merge group, the first cluster number listed will be the re-asigned cluster number for that group (for example, in this case you would end up with a cluster number 0 and a cluster number 1). \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "39b4e41a-4e14-485b-bf00-26c6418fc639",
"metadata": {
"cellView": "form",
"id": "EDJgd8DAXRba",
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title { display-mode: \"form\" }\n",
"\n",
"#@markdown ONLY DO THIS TASK IF YOU WANT TO MERGE CLUSTERS. { display-mode: \"form\" }\n",
"#@markdown OTHERWISE, MOVE ON. \n",
"#@markdown
Below, create your list (of sublists) of clusters to merge.\n",
"#@markdown >Just leave out from the list any clusters that you want unmerged.\n",
"merge_cluster_list = [[0,1,2],[4,3]] #@param\n",
"#@markdown Then, RUN the cell to merge clusters as specified.\n",
"\n",
"for k_group in merge_cluster_list:\n",
" for k in k_group:\n",
" df_props.loc[df_props['cluster']==k,'cluster'] = k_group[0]\n",
"print('you now have the following clusters: ' + str(np.unique(df_props['cluster'])))\n",
"\n",
"print('Tasks completed at ' + str(datetime.now(timezone(-timedelta(hours=5)))))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a3ad42b1-16e5-4ddd-a11f-6f734664107d",
"metadata": {
"cellView": "form",
"id": "JAux50jqXs8a",
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title { display-mode: \"form\" }\n",
"\n",
"#@markdown Now, RUN this cell to plot the average waveform for your new clusters. { display-mode: \"form\" }\n",
"##@markdown And to plot a color-coded scatter of each detected and categorized emg event.\n",
"winsamps = int(windur * fs)\n",
"x = np.linspace(-windur,windur,winsamps*2)*1000\n",
"hfig,ax = plt.subplots(1,figsize=(8,6))\n",
"ax.set_ylabel('amplitude')\n",
"ax.set_xlabel('milliseconds')\n",
"\n",
"# fig = go.Figure()\n",
"\n",
"for k in np.unique(df_props['cluster']):\n",
" spkt = df_props.loc[df_props['cluster']==k]['spikeT'].values\n",
" spkt = spkt[(spkt>windur) & (spkt<(len((data)/fs)-windur))]\n",
" print(str(len(spkt)) + \" spikes in cluster number \" + str(k))\n",
" spkwav = np.asarray([data[(int(t*fs)-winsamps):(int(t*fs)+winsamps)] for t in spkt])\n",
" wav_u = np.mean(spkwav,0)\n",
" wav_std = np.std(spkwav,0)\n",
" # fig.add_trace(go.Scatter(x = x, y = wav_u,line_color=pal[k],name='cluster ' + str(k)),\n",
" # row=1,col=1)\n",
" ax.plot(x,wav_u,linewidth = 3,label='cluster '+ str(k),color=pal[k])\n",
" ax.fill_between(x, wav_u-wav_std, wav_u+wav_std, alpha = 0.25,color=pal[k])\n",
"# fig.update_layout(xaxis_title=\"time(seconds)\", yaxis_title='amplitude',width=500, height=500)\n",
"plt.legend(bbox_to_anchor=[1.25,1]);\n",
"\n",
"# fig = go.Figure()\n",
"# fig.add_trace(go.Scatter(x = xtime, y = samples,line_color='black',name='emg0'))\n",
"# for i,k in enumerate(np.unique(df_props['cluster'])):\n",
"# df_ = df_props[df_props['cluster']==k]\n",
"# fig.add_trace(go.Scatter(x = df_['peaks_t'], y = polarity*df_['height'],line_color=pal[k],name=str(k),mode='markers'))\n",
"# fig.update_layout(xaxis_title=\"time(seconds)\", yaxis_title='amplitude',width=800, height=400)\n",
"\n",
"print('Tasks completed at ' + str(datetime.now(timezone(-timedelta(hours=5)))))\n"
]
},
{
"cell_type": "markdown",
"id": "a48353ef-4d84-45a6-91eb-49669622071b",
"metadata": {},
"source": [
"\n",
"# Part IV. Clusters and instantaneous rate"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d095793a-0fe1-4e95-9375-84a6bca8c2f2",
"metadata": {
"cellView": "form",
"id": "aa44e039-606d-4513-bbd5-568034dcf981",
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title { display-mode: \"form\" }\n",
"\n",
"#@markdown TASK: If you are satisfied with your clustering, { display-mode: \"form\" }\n",
"#@markdown RUN this cell to plot: \n",
"\n",
"#@markdown 1) a scatter of event times overlaid on the raw emg signal \n",
"\n",
"#@markdown 2) a line plot of instantaneous spike rate for each cluster. \n",
"\n",
"fig = make_subplots(rows=2, cols=1,\n",
" shared_xaxes=True,\n",
" vertical_spacing=0.02)\n",
"\n",
"fig.add_trace(go.Scatter(x = time[int(start_time*fs):int(stop_time*fs)],\n",
" y = data[int(start_time*fs):int(stop_time*fs)],\n",
" line_color='black',name='raw emg'),\n",
" row=1,col=1)\n",
"for k in np.unique(df_props['cluster']):\n",
" df_ = df_props[df_props['cluster']==k]\n",
" \n",
" fig.add_trace(go.Scatter(x = df_['spikeT'], y = polarity*df_['height'],\n",
" line_color=pal[k],name=str(k) + ' times',mode='markers'),row=1,col=1)\n",
" # fig.add_trace(go.Scatter(x = df_['peaks_t'], y = polarity*df_['height'],\n",
" # line_color=pal[k],name=str(k),mode='markers'),row=1,col=1)\n",
"\n",
"\n",
"for k in np.unique(df_props['cluster']):\n",
" df_ = df_props[df_props['cluster']==k]\n",
" fig.add_trace(go.Scatter(x = df_['spikeT'][1:], y = 1/np.diff(df_['spikeT']),\n",
" line_color=pal[k],name='cluster ' + str(k) + ' rate',mode='markers'),\n",
" row=2,col=1)\n",
" # fig.add_trace(go.Scatter(x = df_['peaks_t'][1:], y = 1/np.diff(df_['peaks_t']),\n",
" # line_color=pal[k],name='cluster ' + str(k) + ' rate',mode='markers'),\n",
" # row=2,col=1)\n",
"\n",
"print('Tasks completed at ' + str(datetime.now(timezone(-timedelta(hours=5)))))\n",
"print('Now wait a moment while the plots render.')\n",
"\n",
"fig.update_layout(xaxis2_title=\"time(seconds)\", \n",
" yaxis_title='amplitude (volts)', yaxis2_title='instantaneous spike rate',\n",
" width=800, height=500)\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "561d3ac5-e872-42d1-b14d-d0d8cb073695",
"metadata": {
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title {display-mode:\"form\"}\n",
"\n",
"#@markdown Select a section of your recording to analyze.\n",
"#@markdown For example, you can focus on a baseline period, a stimulus period, or a combination.\n",
"\n",
"start_time = 42 #@param\n",
"\n",
"stop_time = 44 #@param\n",
"\n",
"#@markdown Then run this code cell to assign that segment of data to the variable $$y$$\n",
"\n",
"y = data[int(start_time*fs):int(stop_time*fs)]"
]
},
{
"cell_type": "markdown",
"id": "8303803c-40a6-47ea-aab6-a0464b607b7a",
"metadata": {},
"source": [
"What is the standard deviation of your signal? What is the mean?\n",
"Use the numpy \"std\" and \"mean\" methods to calculate them. \n",
"\n",
"Assign the standard deviation to the variable \"SD\"\n",
"Assign the mean to the variable \"M\"\n",
"\n",
"Note: the signal is stored in a variable called \"y\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dd45cc32-1207-4527-b39e-19ee21b82cdd",
"metadata": {},
"outputs": [],
"source": [
"SD = np.std(y)\n",
"\n",
"M = np.mean(y)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "80e83a83-b0a7-40d6-a388-61d200ddce7d",
"metadata": {
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title {display-mode:\"form\"}\n",
"\n",
"#@markdown Run this cell to plot a voltage histogram for your section of signal\n",
"#@markdown You can zoom in along the y-axis to see the distribution for more \"rare\" voltages.\n",
"\n",
"n,bins = np.histogram(y,\"fd\")\n",
"\n",
"\n",
"fig = go.Figure()\n",
"fig.add_trace(go.Scatter(x = bins[1:], y = n/sum(n)))\n",
"# plt.plot(,)\n",
"fig.add_vline(x=SD, line_width=3, line_dash=\"dash\", line_color=\"green\")\n",
"fig.add_vline(x=-SD, line_width=3, line_dash=\"dash\", line_color=\"green\")\n",
"fig.add_vline(x=M, line_width=3, line_dash=\"dash\", line_color=\"purple\")\n",
"# plt.vlines(SD,0,np.max(n/sum(n)),color = 'purple')\n",
"# plt.vlines(-SD,0,np.max(n/sum(n)),color = 'purple')\n",
"# plt.vlines(M,0,np.max(n/sum(n)),color = 'green')\n",
"# plt.ylim(0,0.001)\n",
"fig.update_layout(xaxis_title=\"voltage\", \n",
" yaxis_title='fraction samples',\n",
" width=600, height=400)"
]
},
{
"cell_type": "markdown",
"id": "ee9a8d05-efee-4523-85b9-0976a1de3dbf",
"metadata": {},
"source": [
"\n",
"# Part V. Trial-Averaged Analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0fa096a1-0709-4e84-88f1-9df1eabdca3b",
"metadata": {
"tags": [
"hide-input"
]
},
"outputs": [],
"source": [
"#@title {display-mode:\"form\"}\n",
"\n",
"#@markdown Determine trial times and a trial duration from your recording\n",
"#@markdown > cluster_id is a list that can contain more than one cluster (the spikes from the clusters will be considered together)\n",
"\n",
"trial_times = [44.32,46.45,49.14]\n",
"cluster_id = [0,1,2]\n",
"trial_dur = 2\n",
"baseline_dur = 1\n",
"bin_width = 0.15\n",
"\n",
"trials = []\n",
"for t in trial_times:\n",
" df_ = df_props[df_props['cluster'].isin(cluster_id)]\n",
" trial_ = df_[\"spikeT\"].values- trial_times[0]\n",
" trials.append(trial_[(trial_> -baseline_dur) & (trial_ \n",
"Written by Dr. Krista Perks for courses taught at Wesleyan University."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "524d19fa-3361-46f5-876b-4f5b9e6126de",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "57a8a68c-055e-4af0-ba4d-67d0d67148f1",
"metadata": {},
"source": [
""
]
},
{
"cell_type": "markdown",
"id": "a350712e-e145-4475-9588-e3f19469b5ce",
"metadata": {},
"source": [
""
]
},
{
"cell_type": "markdown",
"id": "86fd2b4a-b890-465f-bf33-c2057738789e",
"metadata": {},
"source": [
""
]
},
{
"cell_type": "markdown",
"id": "b05de192-2baf-4d46-8848-6db1e3b50bff",
"metadata": {},
"source": [
""
]
},
{
"cell_type": "markdown",
"id": "e6519277-fc86-44fd-a75b-f74cad0efcf1",
"metadata": {},
"source": [
""
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.13"
},
"toc-autonumbering": false,
"toc-showcode": false,
"toc-showmarkdowntxt": false,
"toc-showtags": false,
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"state": {},
"version_major": 2,
"version_minor": 0
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}