I coded an experiment in which participants are presented with a series of visual stimuli (stim duration: 100ms, trial duration: 500ms). Simultaneously with the onset of the visual stimuli, there is a sound playing for 100 ms. Some of the visual stimuli are targets and participants should press spacebar when they detect the target. I want to know participants' reaction times to the target. So I store, using event.getKey, the global time when the spacebar was pressed. I store a global time to compare the time of the onset of the trial with the time when spacebar was pressed. I do that because my inter-trial interval is short and it can happen that participants will respond to the target during the following trial.
The code seem to work when I comment out sd.play of the sound, but as soon as the sound is played, the reaction times seem off and it always stores it in the trial following the target trial (even though I know I pressed spacebar during target trial).
Did anyone encounter this problem before?
Below is the code for the procedure:
def response_check(key):
"""
Checks if a key was pressed.
Keyword arguments:
key -- containing either a keypress and a time or nothing (list)
return:
time -- nan if not pressed or time of press if pressed
"""
if len(key) == 0:
pressed = 0
elif 'space' in key[0]:
pressed = 1
if pressed == 1:
time = key[0][1]
elif pressed == 0:
time = 'nan'
return str(time), pressed
for t in range(n_trials): # n_trials is the total amount of trials
show_target_crosses(pauses, t, trial_paradigm[t], hi_targets, low_targets) # show target
l_trial_start = globalClock.getTime()
check4esc() # check for esc
#set stimuli according to condition
standing = visual.Rect(win=win, name='up_cross_hor', width=(dimentions[1]),
height=(dimentions[0]), ori=0, pos=(0, 0), lineWidth=1,
lineColor=colors[all_crosses[trial_paradigm[t]][t]],
lineColorSpace='rgb', fillColor=colors[all_crosses[trial_paradigm[t]][t]],
fillColorSpace='rgb', opacity=1, depth=0.0, interpolate=True)
laying = visual.Rect(win=win, name='up_cross_hor', width=(dimentions[0]),
height=(dimentions[1]), ori=0, pos=(0, position[all_crosses[trial_paradigm[t]][t]]), lineWidth=1,
lineColor=colors[all_crosses[trial_paradigm[t]][t]],
lineColorSpace='rgb', fillColor=colors[all_crosses[trial_paradigm[t]][t]],
fillColorSpace='rgb', opacity=1, depth=0.0, interpolate=True)
sd.play(all_sounds[all_paradigms[trial_paradigm[t]][t]], fs) # Play sound
if first_seven[t] == 0:
if all_responses[trial_paradigm[t]][t] == 0:
trigger(trig_list[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]],0.01) # send sound trigger
elif all_responses[trial_paradigm[t]][t] == 1:
trigger(trig_list_targets[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]],0.01)
core.wait(0.06) # adjust diode to sound delay
standing.draw() # vertical bar
laying.draw() # horizontal bar
whiteOn.draw() # square
win.flip() # show cross and white square for fotodiode
core.wait(0.1) # show cross 100 ms
win.flip() # turn visual stuff off
core.wait(0.032) # adjust ITI
l_fp = int(ok_data[0])
l_block_nr = blocks[t]+1
l_trial_nr = (range(367)*n_blocks)[t]+1
l_condition = trial_paradigm[t]
l_sound = all_sounds_names[all_paradigms[trial_paradigm[t]][t]]
if first_seven[t] == 0:
if all_responses[trial_paradigm[t]][t] == 0:
l_trigger = trig_list[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]] # send sound trigger
elif all_responses[trial_paradigm[t]][t] == 1:
l_trigger = trig_list_targets[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]]
elif first_seven[t] == 1:
l_trigger = 999
l_target = all_responses[trial_paradigm[t]][t]
l_cross_condition = all_crosses[trial_paradigm[t]][t]
key = event.getKeys(keyList = ['space'], timeStamped = globalClock)
l_response_time = response_check(key)[0]
# Save data to file
#'fp\tblock_nr\ttrial_nr\tcondition\tsound\ttrigger\ttarget\tcross_cond\ttrial_start\tresponse_time\n'
dataFile.write('%i\t%i\t%i\t%i\t%s\t%i\t%i\t%i\t%f\t%s\n' %(
l_fp, l_block_nr, l_trial_nr, l_condition, l_sound, l_trigger,
l_target, l_cross_condition, l_trial_start, l_response_time))
paus(t, pauses, blocks, trig = 192) # check for pauses
=========== EDIT ============ Below I paste the MCVE version of the whole experiment:
from psychopy import visual
from psychopy import core, gui, data, event, parallel
import sounddevice as sd
import time, random, math, sys
import numpy as np
# Functions --------------------------------------------------------------------
def response_check(key):
"""
Checks if a key was pressed.
Keyword arguments:
key -- containing either a keypress and a time or nothing (list)
return:
time -- nan if not pressed or time of press if pressed
"""
if len(key) == 0:
pressed = 0
elif 'space' in key[0]:
pressed = 1
if pressed == 1:
time = key[0][1]
elif pressed == 0:
time = 'nan'
return str(time), pressed
def create_sinusoid (freq = 1000, phase = 0, fs = 48000, dur = 1):
'''Create a sinusoid of specified length with amplitude -1 to 1. Use
set_gain() and fade() to set amplitude and fade-in-out.
Keyword arguments:
frequency -- frequency in Hz (float)
phase -- phase in radians (float)
fs -- sampling frequency (int)
duration -- duration of signal in seconds (float).
Return:
sinusoid -- monosignal of sinusoid (1xn numpy array)
'''
t = np.arange(0, dur, 1.0/fs) # Time vector
sinusoid = np.sin(phase + 2*np.pi* freq * t) # Sinusoid (mono signal)
return sinusoid
def fade(monosignal,samples):
'''Apply a raised cosine to the start and end of a mono signal.
Keyword arguments:
monosignal -- vector (1xn numpy array).
samples -- number of samples of the fade (integer). Make sure that:
2*samples < len(monosignal)
Return:
out -- faded monosignal (1xn numpy array)
'''
ramps = 0.5*(1-np.cos(2*np.pi*(np.arange(2*samples))/(2*samples-1)))
fadein = ramps[0:samples]
fadeout = ramps[samples:len(ramps)+1]
plateu = np.ones(len(monosignal)-2*samples)
weight = np.concatenate((fadein,plateu,fadeout))
out = weight*monosignal
return out
def set_gain(mono, gaindb):
''' Set gain of mono signal, to get dB(rms) to specified gaindb
Keyword arguments:
mono -- vector (numpy array).
gaindb -- gain of mono in dB re max = 0 dB (float).
Return:
gained -- monosignal (numpy array)
'''
rms = np.sqrt(np.mean(mono**2))
adjust = gaindb - 20 * np.log10(rms)
gained = 10**(adjust/20.0) * mono # don't forget to make 20 a float (20.0)
# Print warning if overload, that is, if any abs(sample-value) > 1
if (np.max(np.abs(gained)) > 1):
message1 = "WARNING: set_gain() generated overloaded signal!"
message2 = "max(abs(signal)) = " + str(np.max(np.abs(gained)))
message3 = ("number of samples >1 = " +
str(np.sum(1 * (np.abs(gained) > 1))))
print message1
print message2
print message3
return gained
# Screen
win = visual.Window([800, 600], allowGUI = False, # [1920, 1080]
monitor = 'testMonitor', units = 'height', color = 'gray')
# ==============================================================================
# TONE ORDER AND RESPONSES ----------------------------------------------------
# 1 - 500 Hz
# 0 - 550 Hz
# 2 - 605 Hz
# 3 - 666 Hz
# 4 - 732 Hz
# 5 - 805 Hz
# 6 - 886 Hz
# 7 - 974 Hz
tone_order = np.random.choice([0,1,2,3,4,5,6,7], 20, replace = True)
targets = np.random.choice([1,0,0,0,0]*4, 20, replace = False)
# ==============================================================================
# CREATE SOUNDS ----------------------------------------------------------------
#sd.default.device = "ASIO Fireface USB"
print 'Sound device ------------------------------------------------------------'
print sd.query_devices()#device = "ASIO Fireface USB")
print '-------------------------------------------------------------------------'
# Set the gain and sampling frequency (fs)
gain = -30
fs = 44100
frequencies = [500, 550, 605, 666, 732, 805, 886, 974]
tones = [0]*8
for t in range(len(frequencies)):
tones[t] = set_gain(fade(create_sinusoid(
freq = frequencies[t], phase = 0, fs = fs, dur = 0.1),441),gain) # 100 ms, 10 ms fade in/out
f_500 = np.transpose(np.array([tones[0],tones[0]])) # deviant, control
f_550 = np.transpose(np.array([tones[1],tones[1]])) # standard
f_605 = np.transpose(np.array([tones[2],tones[2]]))
f_666 = np.transpose(np.array([tones[3],tones[3]]))
f_732 = np.transpose(np.array([tones[4],tones[4]]))
f_805 = np.transpose(np.array([tones[5],tones[5]]))
f_886 = np.transpose(np.array([tones[6],tones[6]]))
f_974 = np.transpose(np.array([tones[7],tones[7]]))
all_tones = [f_500, f_550, f_605, f_666, f_732, f_805, f_886, f_974]
# ==============================================================================
# CREATE VISUALS ---------------------------------------------------------------
stimulus = visual.TextStim(
win, color = 'white', height = 0.03, pos = (0, 0), text = '')
# ==============================================================================
# Make a text file to save data ------------------------------------------------
fileName = 'test'
dataFile = open(fileName+'.txt', 'w')
dataFile.write('soundCond\ttarget\ttrial_start\tresponse_time\n')
# ==============================================================================
# Keep track of time -----------------------------------------------------------
globalClock = core.Clock()
respClock = core.Clock()
# ==============================================================================
# Experimental procedure -------------------------------------------------------
# Trial loop
for t in range(len(tone_order)):
l_trial_start = globalClock.getTime()
#set stimuli according to condition
if targets[t] == 0:
stimulus.text = '+'
else:
stimulus.text = 'o'
sd.play(all_tones[tone_order[t]], fs) # Play sound for current trial
core.wait(0.08) # adjust visual to sound delay
stimulus.draw() # vertical bar
win.flip() # show cross and white
core.wait(0.1) # show cross 100 ms
win.flip() # turn visual stuff off
core.wait(0.26) # adjust ITI
l_sound = tone_order[t]
l_target = targets[t]
key = event.getKeys(keyList = ['space'], timeStamped = globalClock)
l_response_time = response_check(key)[0]
# Save data to file
#'soundCond\ttarget\ttrial_start\tresponse_time\n'
dataFile.write('%i\t%i\t%f\t%s\n' %(
l_sound, l_target, l_trial_start, l_response_time))
dataFile.close()