3

I am trying to capture a timed screenshot during my psychopy task. I have a fixation cross, followed by 2 faces on the left and right side of the screen, and then a dot. I just want a screenshot of the 1 second time period that the two faces appear on the screen. There are 10 different face pairs in the routine and the routine is looped 3 times. Ideally, I would like to have 30 images saved to my computer through this code. I have my code so far below:

from __future__ import division  # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import *  # things like STARTED, FINISHED
import numpy as np  # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg,    linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os  # handy system and path functions

import socket
import time

# Store info about the experiment session
expName = 'DotProbe_EyeTracker_BSchool' 
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit()  # user pressed cancel
expInfo['date'] = data.getDateStr()  # add a simple timestamp
expInfo['expName'] = expName

# Setup files for saving
if not os.path.isdir('data'):
    os.makedirs('data')  # if this fails (e.g. permissions) we will get error
filename = 'data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING)  # this outputs to the screen, not a file

# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
    extraInfo=expInfo, runtimeInfo=None,
    originPath=None,
    savePickle=True, saveWideText=True,
    dataFileName=filename)

# Start Code - component code to be run before the window creation

# Setup the Window
win = visual.Window(size=(1366, 768), fullscr=True, screen=0, allowGUI=False,      
    allowStencil=False, monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb')
myClock = core.Clock()

# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
    frameDur = 1.0/round(expInfo['frameRate'])
else:
    frameDur = 1.0/60.0 # couldn't get a reliable measure so guess

# Initialize components for Routine "instructions"
instructionsClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='Respond to the probe once it appears. EIther click "2" when probe replaces left     face or click "3" when probe replaces right face.',    font='Arial',
    pos=[0, 0], height=0.1, wrapWidth=None,
    color='white', colorSpace='rgb', opacity=1,
    depth=0.0)

# Initialize components for Routine "block1"
block1Clock = core.Clock()
fixation = visual.TextStim(win=win, ori=0, name='fixation',
    text='+',    font='Arial',
    pos=[0, 0], height=0.1, wrapWidth=None,
    color='white', colorSpace='rgb', opacity=1,
    depth=0.0)

leftimage = visual.ImageStim(win=win, name='leftimage',
    image='sin', mask=None,
    ori=0, pos=[0,0], size=[1, 1.34],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    texRes=128, interpolate=False, depth=-1.0)

rightimage = visual.ImageStim(win=win, name='rightimage',
    image='sin', mask=None,
    ori=0, pos=[0,0], size=[1, 1.34],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    texRes=128, interpolate=False, depth=-2.0)

probe = visual.ImageStim(win=win, name='probe',
    image='sin', mask=None,
    ori=0, pos=[0,0], size=[0.5, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    texRes=128, interpolate=False, depth=-3.0)

#Get and save a screen shot" of everything in stimlist:
stimlist = [leftimage, rightimage]
t0 = myClock.getTime()
rect=(-1,1,1,-1)
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
    # rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
    # as a list of the edges: Left Top Right Bottom, in norm units.

# Create some handy timers
globalClock = core.Clock()  # to track the time since experiment started
routineTimer = core.CountdownTimer()  # to track time remaining of each (non-slip) routine
user3849871
  • 33
  • 1
  • 3
  • 2
    Looks like you want to use [`Window.getMovieFrame()`](http://psychopy.org/api/visual/window.html#psychopy.visual.Window.getMovieFrame) and `Window.saveMovieFrames()` – Marius Aug 10 '14 at 23:36
  • 1
    @Marius is correct. Perhaps he should add that as an answer rather than a comment? – Michael MacAskill Aug 10 '14 at 23:59

3 Answers3

5

Use win.getMovieFrame and win.saveMovieFrames as others have suggested. You don't need the visual.BufferImageStim. You'd likely end up with a loop over conditions When you finish your script. I'd be taking the screenshots as the actual experiment is running rather than "simulating" beforehand. It ensures that your screenshots are an accurate depiction of what's actually going on during the experiment - also if you'd make mistakes and drew things incorrectly :-) Of course, if the purpose of the screenshots are purely for the documentation, do remove/outcomment those lines as you run the actual experiment to improve performance.

# Loop through trials. You may organize them using ``data.TrialHandler`` or generate them yourself.
for trial in myTrialList:
    # Draw whatever you need, probably dependent on the condition. E.g.:
    if trial['condition'] == 'right':
        rightimage.draw()
    else:
        leftimage.draw() 
    fixation.draw()

    # Show your stimulus
    win.flip()

    # Save screenshot. Maybe outcomment these line during production.
    win.getMovieFrame()   # Defaults to front buffer, I.e. what's on screen now.
    win.saveMovieFrames('screenshot' + trial['condition'])  # save with a descriptive and unique filename.                                .      
Timo Kvamme
  • 2,806
  • 1
  • 19
  • 24
Jonas Lindeløv
  • 5,442
  • 6
  • 31
  • 54
4

I can't test this as I don't have PyschoPy set up on my current computer, but using Window.getMovieFrame() and Window.saveMovieFrames() should get you to where you need to be, e.g.:

screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
    # rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
    # as a list of the edges: Left Top Right Bottom, in norm units.
# screenshot is currently on the 'back' buffer as we haven't flipped yet
win.getMovieFrame(buffer='back')
win.saveMovieFrames('stimuli.png')
Marius
  • 58,213
  • 16
  • 107
  • 105
  • 1
    +1. I've added a seperate answer (the comment got too long) where I expand a bit on this idea with a few twists. – Jonas Lindeløv Aug 11 '14 at 07:27
  • Almost works ;) You should use only the last two lines and set `buffer='front'` (or you can make an additional call to `screenshot.draw` prior to the last two lines, which is probably not wanted here since the screenshot will be drawn on the next `win.flip`) – Mario Reutter Jul 01 '16 at 13:57
1

I've had trouble using all the answers provided that require win.flip() whilst using PsychoPy builder. Below is the solution that worked for me and uses PIL:

from PIL import ImageGrab
import os

os.makedirs("./data/" + expInfo['participant'], exist_ok=True)
output_image_name = "./data/" + expInfo['participant'] + "/" + str(datetime.datetime.now()).replace("-", "_").replace(" ", "_").replace(".", "_").replace(":", "_") + ".png"
im = ImageGrab.grab()
im.save(output_image_name, 'png')