<?xml version="1.0" encoding="UTF-8"?>
<itemContainer xmlns="http://omeka.org/schemas/omeka-xml/v5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://omeka.org/schemas/omeka-xml/v5 http://omeka.org/schemas/omeka-xml/v5/omeka-xml-5-0.xsd" uri="https://www.johnntowse.com/LUSTRE/items/browse?collection=2&amp;output=omeka-xml" accessDate="2026-05-01T20:22:22+00:00">
  <miscellaneousContainer>
    <pagination>
      <pageNumber>1</pageNumber>
      <perPage>10</perPage>
      <totalResults>7</totalResults>
    </pagination>
  </miscellaneousContainer>
  <item itemId="156" public="1" featured="0">
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <elementSetContainer>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="3205">
                <text> Dr Megan Readman</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="3206">
                <text>MSc</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="3207">
                <text>Neuro-clinical psychology </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="3208">
                <text>20</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="3209">
                <text>T-test and regression</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
  </item>
  <item itemId="154" public="1" featured="0">
    <fileContainer>
      <file fileId="172">
        <src>https://www.johnntowse.com/LUSTRE/files/original/3ddc0d86634b8437530ec3352beb2ebc.pdf</src>
        <authentication>1ad80421bc21a8ecbaac8b6704bb657f</authentication>
      </file>
    </fileContainer>
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <elementSetContainer>
      <elementSet elementSetId="1">
        <name>Dublin Core</name>
        <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
        <elementContainer>
          <element elementId="50">
            <name>Title</name>
            <description>A name given to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3165">
                <text>Levodopa and antisaccade performance in Parkinson’s disease: the influence of intrinsic dopaminergic functioning, dopamine agonists and chronic anti-parkinsonian medication </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="39">
            <name>Creator</name>
            <description>An entity primarily responsible for making the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3166">
                <text>Amy Austin</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="40">
            <name>Date</name>
            <description>A point or period of time associated with an event in the lifecycle of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3167">
                <text>14th September 2022</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="41">
            <name>Description</name>
            <description>An account of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3168">
                <text>The antisaccade (AS) task is a validated eye-tracking paradigm primarily used to assess response inhibition. Although several studies have established AS error rate and latency to be increased in Parkinson’s disease (PD), the evidence regarding the effect of existing anti-parkinsonian medication (e.g., levodopa) on these parameters is contradictory. According to the dopamine overdose hypothesis (DOH), the effect of levodopa on AS performance should be dependent upon the intrinsic dopaminergic functioning of the individual. The current study is the first study to use spontaneous eye blink rate (SEBR), a proxy measure for dopamine activity, to investigate the influence of intrinsic dopaminergic functioning on AS performance following levodopa consumption. The influence of additional PD related factors was also examined. SEBR and AS performance was assessed in eleven healthy controls (HC) and nine participants with PD. SEBR and AS performance was assessed twice in participants with PD, once 30 minutes prior to, and once one hour after, the consumption of levodopa. Pre-levodopa consumption SEBR was a significant positive predictor of AS error rate post, but not pre, levodopa consumption. Total years consuming anti-parkinsonian medications was positively predictive of AS error rate both pre and post levodopa consumption. The regular consumption of dopamine agonists was found to significantly predict fewer AS errors following the consumption of levodopa. The current results support the DOH; higher intrinsic dopaminergic functioning was associated with increased AS errors following the artificial stimulation of dopamine via by levodopa. Therefore, artificial dopaminergic stimulation of an intrinsically sufficiently functioning dopaminergic system appears to produce an overstimulation/overdose effect whereby consequential detrimental effects on AS performance/response inhibition are observed. The current findings go some way in explaining the inconsistencies within the literature. </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="49">
            <name>Subject</name>
            <description>The topic of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3169">
                <text>Keywords: Parkinson’s disease, dopamine overdose hypothesis, spontaneous eye blink rate, levodopa, dopamine agonists, antisaccade </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="48">
            <name>Source</name>
            <description>A related resource from which the described resource is derived</description>
            <elementTextContainer>
              <elementText elementTextId="3170">
                <text>Twenty-one participants, 10 individuals with mild-moderate idiopathic PD (Mage = 67.10, SDage = 8.63) and 11 healthy control older adults of comparable age (HC; Mage = 66.82, SDage = 9.09) were recruited to the study. The mean age of recruited HC and PD individuals did not differ significantly, t (18.95) = - 0.07, p = .943). Participants were recruited via established research databases and via the social network of the researcher. As the current study focused on PD, participants with a diagnosis of any neurological conditions (beyond PD) were excluded. Additionally, as depression and anxiety influence an individual’s saccadic performance profile and SEBR (Jazbec et al., 2005; Mackintosh et al., 1983), individuals who obtained a clinically moderate depression or anxiety score, as measured by the Hospital Anxiety and Depression scale (HADS), were excluded. Similarly, mild cognitive impairment (MCI) and dementia are associated with increased AS error rate and AS latency (Opwonya et al., 2022), and increased SEBR (D’Antonio et al., 2021). As such, those who presented a cognitive profile indicative of MCI/dementia (score &lt; 82 on the Addenbrookes Cognitive Exam-III, ACE-III; Hsieh et al., 2013) were excluded from the current study. Finally, as experimental stimuli in the current study were coloured red and green, individuals with red-green colour vision deficiency, detected via the Ishihara test (Ishihara, 1917) were also excluded. &#13;
On these grounds of exclusion, one individual with PD was excluded from the current study due to obtaining an ACE-III score indicative of MCI. Subsequently, nine individuals with mild-moderate idiopathic PD (Mage = 65.89, SDage = 8.21) and eleven HC individuals (Mage = 66.82, SDage = 9.09) participated in the study. All participants had normal or corrected to normal vision. &#13;
All participants with PD were classified as Hohen and Yahr stage II or below (Hoehn &amp; Yahr, 1998), indicating they were physically independent and capable of completing all study tasks. At the time of testing, all PD participants were receiving anti-parkinsonian medication (see table 2 for PD sample anti-parkinsonian medication summary). All PD participants were tested under their normal medication regime, that is, participants attended the study 30 minutes prior to the consumption of their next, normally scheduled, dosage of levodopa-based medication. Accordingly, measures were obtained both pre (30 minutes prior) and post (1 hour after) levodopa consumption, permitting the respective investigations of pre and post levodopa consumption SEBR, motor symptom severity, AS performance and PS performance. &#13;
An online calculator computed the levodopa equivalent daily dosages (LEDD) for each participant with PD. LEDD indicates the equivalent amount of levodopa an individual receives from all anti-parkinsonian medications across a 24-hour window (Julien et al., 2021). The online calculator can be accessed via: https://www.parkinsonsmeasurement.org/toolBox/levodopaEquivalentDose.htm &#13;
Materials and measures &#13;
Online questionnaire &#13;
A questionnaire comprised of a demographics and health screening survey, the Edinburgh handedness inventory (EHI), the HADS, and a PD and associated medication &#13;
survey was developed and distributed via Qualtrics (Qualtrics, 2013). The questionnaire required 15 minutes to complete. &#13;
Demographics and health screening survey. Participants were asked to disclose key demographic and health information (e.g., age, sex, whether they had normal or corrected to normal vision). Participants were also asked to disclose any history of visual impairments, neurological conditions (beyond PD), psychiatric illness, or rheumatic illness. &#13;
The EHI (Oldfield, 1971). The EHI is a highly reliable (r = .97, p &lt; .001; Oldfield, 1971) and internally consistent (a = 0.88; Oldfield, 1971) self-report measure of an individual’s hand dominance (Edlin et al., 2015). Participants are requested to indicate their typical hand preference, via five-point Likert scales ranging from ‘always left’- ‘always right’, when completing a range of daily activities (e.g., writing). A final score of ≥ 50 indicates right hand dominance, &lt; 50 to &gt; −50 indicates ambidexterity, and ≤−50 indicates left hand dominance. As hand dominance typically corresponds to ocular dominance (McManus et al., 1999), the EHI was used to infer the dominant eye of each participant in the current study. Monocular eye tacking was then conducted on the dominant eye (Ehinger et al., 2019). &#13;
The HADS (Zigmond &amp; Snaith, 1983). The HADS is a short self-assessment questionnaire validated to detect anxiety and depression within the general population, inclusive of the elderly (Bjelland et al., 2002). Respondents are required to indicate, via four- point Likert scales, how 14 items relate to their recent feelings. Responses range from ‘0’ (the item has little relevance to recent feelings), to ‘4’ (the item is significantly representative of recent feelings). Likert responses are summed separately for anxiety and depression relevant items. Scores of seven or less indicate no notable presence of anxiety and depression. Scores ranging between eight and 10 indicate mild levels, between 11 and 14 indicate moderate levels, and between 15 and 21 indicate severe levels. &#13;
PD and associated medication survey. Individuals with PD were asked to disclose further health information regarding the number of years since their PD diagnosis, which anti-parkinsonian medications they were currently receiving, the daily dosages of these medications and the total number of years they had been consuming anti-parkinsonian medications. &#13;
ACE-III (Hsieh et al., 2013) &#13;
The ACE-III is a well validated (Hseih et al., 2013), highly reliable and internally consistent (ICC = 0.92, a = 0.87 respectively; Takenoshita et al., 2019) cognitive assessment used to screen for the presence of MCI and dementia syndromes (Hsieh et al., 2013). To provide a global neuropsychological evaluation, participants are asked to complete tasks assumed to relate to five principal cognitive functions, namely: memory, language, attention, visuospatial skills, and verbal fluency (Hodges &amp; Larner, 2017). Scores ascertained from each of the five domains are summed and the individual receives an overall score relative to the maximum possible score of 100. Higher scores indicate better cognitive functioning. A score below 82 is indicative of cognitive impairment. &#13;
Ishihara colour deficiency test (Ishihara, 1917) &#13;
The Ishihara colour deficiency test is a 38-item assessment of red-green colour perception. Typical red-green colour vision is marked by the ability to correctly decipher a number or pattern embedded within 38 red/green circular images. The test requires three minutes to complete. &#13;
MDS-UPDRS (Goetz et al., 2008) &#13;
Both motor and non-motor PD symptoms were evaluated using the MDS-UPDRS. The MDS-UPDRS is comprised of four distinct subscales. Subscale I focuses on non-motor symptoms associated with PD (e.g., cognitive impairment, dopamine dysregulation syndrome), whereas subscales II – IV focus on the motor symptoms associated with PD. Subscales I, II and IV require participants to retrospectively respond with answers reflecting their average symptoms/experiences over the previous week. Whereas subscale III directly assesses current functioning via a motor exam. The motor examination requires participants to perform a series of motor tasks (e.g., finger tapping, walking, arising from a chair) under the observation of the examiner. The examiner rates the severity of motor impairment displayed during each motor task performed. All subscales of the MDS-UPDRS are scored according to four-point-Likert scales whereby ‘0’ indicates no impairment and ‘4’ indicates the most severe impairment. Hoehn and Yahr (Hoehn &amp; Yahr, 1998) stages were calculated based upon the MDS-UPDRS assessment. The accumulative score of subscales I, II, III and IV provide an overall MDS-UPDRS score indicative of PD severity. A maximum score of 199 is reflective of the most severe disability the result of PD (Holden et al., 2018). The MDS-UPDRS requires approximately 30 minutes to complete. &#13;
SEBR &#13;
SEBR was assessed by recording participant’s eye movements whilst sitting at rest. The recording device was located approximately 55cm directly in front of the participant. Participants were not informed that they were completing an assessment of their blink rate, nor were they engaged into conversation with the examiner as both informing participants that their blink rate is being assessed and conversing increase SEBR (Doughty, 2001). Participants eye movements were recorded for two-and-a-half minutes however, only the last one minute of each recording was coded for SEBR (one minute is sufficiently long enough to obtain a representative blink rate, Deuschl &amp; Goddemeier, 1998). A blink was identified (and coded accordingly) as full eye lid closure which was the result of bilateral movement of the eyelids (Kimber &amp; Thompson, 2000). SEBR was scored as the number of blinks per minute. PD participant pre-levodopa consumption SEBR was considered their baseline SEBR, reflective of intrinsic dopaminergic functioning (Kimber &amp; Thompson, 2000). &#13;
Eye tracking tasks &#13;
Apparatus &#13;
A desktop mounted eye tracker (Eyelink Desktop 1000), operating in monocular mode, with a sampling rate of 500 Hz was used to record eye movements of the participant’s dominant eye. An adjustable chin rest with attached forehead rest was utilized to minimise head movements. The eye tracking camera was located at the base of the stimuli presenting computer monitor. Participants sat approximately 55cm away from the eye tracking camera and computer monitor. A 4-point calibration, whereby participants are asked to fixate upon a red circle as it moves from the top, bottom, right and left side of the computer screen, was used prior to the commencement of all eye tracking tasks. Frequent calibration improves the accuracy of eye-tracking data (Pi &amp; Shi, 2019). All eye tracking tasks were developed and operated using experiment builder software version 1.10.1630. Habitual eye glass wearers were not required to remove their eyeglasses during eye tracking tasks. Eye tracking tasks required approximately 10 minutes to complete. &#13;
Prosaccade task &#13;
Participants completed four practice trials and 16 experimental gap trials. To centre a participant’s gaze at the start of each trial, a white fixation stimulus was presented for 1000 milliseconds (ms) in the centre of a back computer screen. A red lateralised target was then displayed randomly either to the right or the left of the central fixation for 1200ms at 4 ° eccentricity. The PS task operated according to the gap paradigm. Accordingly, to create a temporal gap between fixation and target stimuli, a black interval screen was presented for 200ms between the extinguishing of the white fixation stimulus and the presentation of the red target stimulus. For the PS task, participants were instructed to shift their visual focus towards the location of the red target as quickly and as accurately as possible. &#13;
Antisaccade task &#13;
Participants completed four practice trials followed by 24 experimental gap trials. Participants were presented with a white central fixation stimulus on a black computer screen for 1000ms. Following a 200ms black interval screen, a green lateralised target stimulus was presented at random to either the left or right of the central fixation. The green target was displayed for 2000ms at 4 ° eccentricity. Participants were instructed to shift their visual focus to the opposite direction of where the green target stimulus appeared. An example of a successful trial would be as follows, if the green target stimulus was presented left-lateralised, participants should direct their gaze to the right side of the computer screen. &#13;
Procedure &#13;
The present study was reviewed and approved by Lancaster University’s ethics committee. All participants provided informed consent prior to participating. &#13;
Participants were tested on one day and testing sessions took no longer than two hours. Individuals with PD completed SEBR assessments, MDS-UPDRS III motor examinations and all eye tracking tasks twice, once 30 minutes prior to consuming their usually scheduled dosage of levodopa medication, and once again one hour following the consumption of their levodopa medication. Prior research indicates that one hour is sufficient for levodopa to be metabolized and produce therapeutic effects (Lu et al., 2019). This method of testing the effect of anti-parkinsonian medications is widely used within the literature and no detrimental effects of this method have been reported (Cools et al., 2003). Similarly, re- test on the PS and AS tasks does not significantly influence performance (Larrison-Faucher et al., 2004). HC participants completed all study tasks once. &#13;
All participants completed the online questionnaire 48 hours prior to attending testing sessions. Upon arriving to testing, all participants completed an assessment of SEBR followed by the PS and the AS tasks. HC participants then completed the ACE-III and the Ishihara test. HC participation in the study was then complete. PD participants continued with further testing. Specifically, PD participants then completed the MDS-UPDRS subscale III motor examination. PD participants then consumed their usual dose of levodopa medication at their usual time. During the one-hour levodopa metabolization period, participants with PD completed subscales I, II and IV of the MDS-UPDRS, the ACE-III and the Ishihara test. &#13;
Once one hour had elapsed, individuals with PD then re-completed an assessment of SEBR, the PS and the AS task, and were also re-assessed via the MDS-UPDRS subscale III motor examination. Thus, motor symptom severity (MDS-UPDRSIII), SEBR and eye- tracking data were obtained from both pre (baseline) and post levodopa consumption medication states. &#13;
Data processing &#13;
Raw data were extracted via EyeLink using DataViewer Software Version 3.2 and processed offline using the bespoke software SaccadeMachine (Mardanbegi et al., 2019). SaccadeMachine removes noise and spikes within the data; frames with a velocity signal greater than 1500 deg/s or with an acceleration signal greater than 100,000deg2/sec are filtered out. Fixations and saccadic events were detected via the EyeLink Parser. Trials were excluded where participants failed to direct their gaze to the central fixation stimulus. To ensure saccadic data were reflective of responses to target presentation, a temporal window of 80-700ms from the initial onset of the target stimulus was used (i.e., anticipatory saccades produced prior to 80ms, and excessively delayed saccades produced after 700ms were excluded). The following variables were extracted from the processed data: PS latency (the time taken between the onset of the target stimulus and the first correct fixation), PS error rate (the number of times the participant failed to generate a reflexive saccade to fixate upon the target stimulus), AS latency (the time taken between the onset of the target stimulus and the first correct fixation in the opposite direction to the target stimulus), AS error rate (the number of times a participant erroneously performed a reflexive PS towards the novel target stimulus instead of looking away). </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="45">
            <name>Publisher</name>
            <description>An entity responsible for making the resource available</description>
            <elementTextContainer>
              <elementText elementTextId="3171">
                <text>Lancaster University</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="42">
            <name>Format</name>
            <description>The file format, physical medium, or dimensions of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3172">
                <text>Data/R.csv</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="43">
            <name>Identifier</name>
            <description>An unambiguous reference to the resource within a given context</description>
            <elementTextContainer>
              <elementText elementTextId="3173">
                <text>Austin 2022</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="37">
            <name>Contributor</name>
            <description>An entity responsible for making contributions to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3174">
                <text>Rachel Jordan&#13;
Sian Reid</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="47">
            <name>Rights</name>
            <description>Information about rights held in and over the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3175">
                <text>Open</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="46">
            <name>Relation</name>
            <description>A related resource</description>
            <elementTextContainer>
              <elementText elementTextId="3176">
                <text>N/A</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="44">
            <name>Language</name>
            <description>A language of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3177">
                <text>English</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="51">
            <name>Type</name>
            <description>The nature or genre of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3178">
                <text>Data</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="38">
            <name>Coverage</name>
            <description>The spatial or temporal topic of the resource, the spatial applicability of the resource, or the jurisdiction under which the resource is relevant</description>
            <elementTextContainer>
              <elementText elementTextId="3179">
                <text>LA1 4YF</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="3403">
                <text>Dr Megan Readman</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="3404">
                <text>Msc</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="3405">
                <text>Neuro-clinical psychology</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="3406">
                <text>20 (9 individuals with mild-moderate Parkinson's disease, 11 healthy control individuals of similar age)</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="3407">
                <text>Regression, T-Test</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
  </item>
  <item itemId="151" public="1" featured="0">
    <fileContainer>
      <file fileId="150">
        <src>https://www.johnntowse.com/LUSTRE/files/original/54ff2b32ca6ddc076571e720c7f80444.pdf</src>
        <authentication>1c7c86c045532986fdad17219d9d6e82</authentication>
      </file>
      <file fileId="151">
        <src>https://www.johnntowse.com/LUSTRE/files/original/6ee62233e0839f9c2766d58b4b93b348.pdf</src>
        <authentication>1c7c86c045532986fdad17219d9d6e82</authentication>
      </file>
      <file fileId="152">
        <src>https://www.johnntowse.com/LUSTRE/files/original/6bb01a175bd17e9527b8e3c400460fb2.pdf</src>
        <authentication>1c7c86c045532986fdad17219d9d6e82</authentication>
      </file>
    </fileContainer>
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <elementSetContainer>
      <elementSet elementSetId="1">
        <name>Dublin Core</name>
        <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
        <elementContainer>
          <element elementId="50">
            <name>Title</name>
            <description>A name given to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3115">
                <text>Eye tracking and Attention Deficit Hyperactivity Disorder (ADHD): Can eye tracking identify the feigning of ADHD?</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="39">
            <name>Creator</name>
            <description>An entity primarily responsible for making the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3116">
                <text>Reva Maria George </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="40">
            <name>Date</name>
            <description>A point or period of time associated with an event in the lifecycle of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3117">
                <text>7/09/2022</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="41">
            <name>Description</name>
            <description>An account of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3118">
                <text>When diagnosing adult ADHD, it has proven difficult for clinicians to detect deceptive behaviour. Diagnosis of ADHD comes with economic, academic, and recreational benefits, which may account for the increasing feigning of the disorder. Current diagnostic methods: clinical interviews and self-report scales can be easily manipulated for a positive diagnosis. Hence the present study evaluated the utility of eye tracking devices to detect the feigning of ADHD. Eye movements of 38 participants (7 ADHD, 15 healthy controls, and 16 healthy feigners) were captured throughout the prosaccade and anti-saccade task. The performance of the participants on the task was evaluated in terms of latency and the percentage of error rate. The findings of the study reveal a significant difference in the latency of anti-saccade tasks i.e., feigners have an increased latency compared to healthy controls and ADHD participants. Because of the limited sample size, study findings cannot be generalized. Further investigations are needed with a much larger sample.</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="49">
            <name>Subject</name>
            <description>The topic of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3119">
                <text>Eye-tracking, ADHD, Feigning, Prosaccade task, Anti-saccade task, latency, error rate, eye movements</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="48">
            <name>Source</name>
            <description>A related resource from which the described resource is derived</description>
            <elementTextContainer>
              <elementText elementTextId="3120">
                <text>Method&#13;
Participants &#13;
 Previous studies explaining feigning in ADHD acquired data from around 90-100 samples (Booksh et.al., 2010; Frazier et.al., 2008; Harrison et.al., 2007). The study therefore aimed to recruit 90 participants, 30 each in ADHD, healthy controls, and healthy feigners faking the disorder. Participants with and without a clinical diagnosis of ADHD were selected using the opportunity sampling method. A total of 42 participants between the age of 18-35 volunteered and were recruited for the study through the university disability service (11%), posters (16%) and through word of mouth (73%). Data of two participants were removed as the eye tracker repeatedly lost the pupil during recording. All participants were rewarded with an equal chance to win one of 6 £25 vouchers. Thirty-one of the 42 participants were healthy younger adult controls. Of the healthy control participants 15 (7 females; Mage = 24.33; SDage=4.32) participated as healthy controls, and the remaining 16 (9 females; Mage = 24.25; SDage=1.88) as healthy feigners. Seven ADHD participants (6 females) with a mean age 22.71 (SD=2.22) completed the study. The severity of the ADHD symptoms was analysed using the Adult ADHD self-report scale (for more demographic details see Table 1). The exclusion criteria include participants: 1) with any visual (other than corrected-to-normal vision) impairment 2) with any cognitive impairment 3) with additional diagnosis of neurological conditions 4) without a proper clinical diagnosis of ADHD. The exclusion criteria were applied because these impairments may interfere with the participants performance in the task.  &#13;
Prior to data analysis, one of the participants was removed from ADHD group due to the lack of proper clinical diagnosis. Furthermore, a control participant was excluded with the assumption of having a probable mild cognitive impairment because the individual scored less than 82 (cut-off) in the Addenbrooke’s Cognitive Examination-III (ACE-III) (see Table 1 for further demographic details). &#13;
Participants &#13;
 Previous studies explaining feigning in ADHD acquired data from around 90-100 samples (Booksh et.al., 2010; Frazier et.al., 2008; Harrison et.al., 2007). The study therefore aimed to recruit 90 participants, 30 each in ADHD, healthy controls, and healthy feigners faking the disorder. Participants with and without a clinical diagnosis of ADHD were selected using the opportunity sampling method. A total of 42 participants between the age of 18-35 volunteered and were recruited for the study through the university disability service (11%), posters (16%) and through word of mouth (73%). Data of two participants were removed as the eye tracker repeatedly lost the pupil during recording. All participants were rewarded with an equal chance to win one of 6 £25 vouchers. Thirty-one of the 42 participants were healthy younger adult controls. Of the healthy control participants 15 (7 females; Mage = 24.33; SDage=4.32) participated as healthy controls, and the remaining 16 (9 females; Mage = 24.25; SDage=1.88) as healthy feigners. Seven ADHD participants (6 females) with a mean age 22.71 (SD=2.22) completed the study. The severity of the ADHD symptoms was analysed using the Adult ADHD self-report scale (for more demographic details see Table 1). The exclusion criteria include participants: 1) with any visual (other than corrected-to-normal vision) impairment 2) with any cognitive impairment 3) with additional diagnosis of neurological conditions 4) without a proper clinical diagnosis of ADHD. The exclusion criteria were applied because these impairments may interfere with the participants performance in the task.  &#13;
Prior to data analysis, one of the participants was removed from ADHD group due to the lack of proper clinical diagnosis. Furthermore, a control participant was excluded with the assumption of having a probable mild cognitive impairment because the individual scored less than 82 (cut-off) in the Addenbrooke’s Cognitive Examination-III (ACE-III) &#13;
Stimuli and Apparatus &#13;
Addenbrooke’s Cognitive Examination-III (ACE-III) &#13;
The ACE-III, developed by Hodges et.al, is an extended cognitive screening technique. The items of the test produce 5 sub-scores totalling 100, with each sub-score corresponding to a different cognitive domain, such as attention (18 points), memory (26 points), verbal fluency (14 points), language (26 points), and visuospatial skills (16 points) (Noone, 2015). Higher scores indicate superior cognitive functioning within the given domain. The validated cut-off point for normal cognitive functioning is 82/100, therefore individuals who yield a total score of &lt; 82 are assumed to have probable mild cognitive impairment. The ACE-III has proven reliability (α= 0.88), sensitivity (0.93), specificity (1.0) and concurrent validity with alternative cognitive assessments such as the ACE-R (r= 0.99, p &lt; 0.01; Hsieh, 2013).  &#13;
Ishihara Colour blindness test &#13;
Ishihara colour blindness developed by Dr Shinobu Ishihara, was used to assess the colour vision deficiency of congenital origin, particularly red-green deficiency (Ishihara, 2011). It consists of 24 coloured plates containing a circle of dots with random colours and numbers. Each plate includes primary and secondary colour dots, with the primary colours appearing in patterns or numbers, while secondary colours appear as the background (Shaygannejad et.al., 2012). Plates 1–15 were utilised because of the fact that the main goal was to separate the colour defects from the normal colour appreciation simply. The participants were instructed to read out the numbers aloud, without more than three seconds' delay. A participant with an error in reading the numbers of two or more plates were considered to be having an impaired colour vision. &#13;
Royal Air Force (RAF) ruler &#13;
The RAF near point rule is a 50cm long square rule with a cheek rest and slider holding a revolving four-sided cube. One of the 4 sides has a vertical line with a central dot for convergence fixation. It is used for determining the near point of convergence (NPC) (Sharma, 2017). The participant is instructed to keep a direct gaze on the dot while the slider descends and to report when the dot's image breaks into two. The cut-off point for NPC break and NPC recovery is between 5 and 7 cm respectively (Pang et.al., 2010) &#13;
Adult ADHD Self Report Scale (ASRS-v1.1; Kessler et al., 2005) &#13;
The severity of ADHD symptoms presented by individuals with ADHD was assessed using the ASRS. The ASRS is an 18-item checklist, developed by the World Health Organization (WHO) work group together with the WHO World Mental Health (WMH) Survey Initiative (Kessler et al., 2005), to screen ADHD in adult patients. Completion of the ASRS requires participants to indicate how much they agree that the given statement relates to their behaviour over the past 6 months. The questions are divided into 2 parts: part A and part B. Part A contains 6 questions that are indicative of symptoms consistent with ADHD and are used for screening purposes. A score of 4 or above denotes symptoms typical with ADHD. The final 12 questions in Part B provide a more detailed breakdown of the specific symptoms an individual is presenting. The scale has high concurrent validity, and the internal consistency of the scale Cronbach’s α was found to be 0.88 (Adler et.al., 2006).&#13;
Hospital Anxiety and Depression Scale (HADS) &#13;
Hospital Anxiety and Depression Scale was developed by Zigmond and Snaith in 1983. It is a 14-item measure, used to detect the psychological distress of the participants (Zigmond &amp; Snaith, 1983). Seven of the items measure anxiety (HADS-A), while the remaining seven measures the depressive symptoms (HADS-D). For each item, the participant is asked to indicate on a four-point scale the degree to which they feel a given statement relates to how they were feeling for the past week. The overall score for both anxiety and depression is 21. A score of 0-7 represents “normal”, 8-10 indicates “mild”, 11-14 “moderate and 15-21 indicates “severe” (Pais-Ribeiro et al., 2018). The scale is reliable and valid in measuring symptoms in both general and psychiatric patients (Bjelland et.al., 2002).  &#13;
&#13;
Eye-Tracking Measurement &#13;
Participants eye movements were recorded via the EyeLink Desktop 1000 at 500Hz. To minimize the head movements, a chin rest was used. Participants were seated approximately 55cm from the computer monitor (monitor run at 60 Hz). All the stimuli used for the study were created and controlled using Experiment Builder Software Version 1.10.1630. Two different computers are used for the eye-tracking system: a host PC which tracks the eye movements and determines their actual gaze positions and a display computer which shows the stimuli during the calibration and experimental trial.  &#13;
Calibration  &#13;
Prior to presenting the experimental stimuli participants completed a 4-point calibration to ensure the eye tracker was accurately tracking their eyes. During this trial, the participant will be asked to follow a red dot that will move to the four edges of a +.  &#13;
Prosaccade task &#13;
Participants were asked to complete 16 gap trials as quickly and accurately as possible. At first the participants were instructed to look at a fixation point to centre their gaze. It was a white target displayed at the centre of the screen for 1000ms. Then they were told to focus on the appearing the red lateralised target, presented randomly to the left or right of the screen at 4° (visual angle) for 1200ms. The temporal gap in stimuli presentation is due to a 200ms blank interval screen which was displayed between the fading of the white fixation stimuli and the initial appearance of the red target.  &#13;
Anti-saccade task &#13;
For anti-saccade task, the participants completed 24 gap trials with 4 practice trials. They were asked to look at the central white fixation presented for 1000ms before shifting their gaze and attentional focus to the opposite side of the screen from where the green target appeared. The green lateralised target was displayed randomly to the left or right side of the screen at 4° (visual angle) for 2000ms. There was a 200ms blank interval screen as a gap in between the fixation point and the target. &#13;
 Procedure &#13;
The study was approved by the Lancaster University Psychology Department Ethics Committee. Prior to study commencement healthy younger adult volunteers were randomly to either the healthy control or healthy feigner (asked to feign ADHD) group. All individuals with a formal clinical diagnosis of ADHD were assigned to the ADHD group. &#13;
The participants were required to visit the lab in order to participate. Before commencing the study, the participants provided informed consent. After taking the required demographic data, participants were then screened for the probable presence of mild cognitive impairment using the ACE-III. They were also screened for any visual impairments using the RAF rule and Ishihara colour blindness test. Then, the participants were asked to complete the HADS, to screen for any psychological distress. Additionally, the ADHD participants were asked to complete the ASRS questionnaire, to determine the severity of the disorder. &#13;
 On completion of the pre-study questionnaires, participants will be provided with Task information leaflet.  &#13;
At this time control and ADHD participants were presented with a vignette (Appendix B) detailing an individual trying to feign ADHD. Comparatively, those assigned to the feigning condition were presented with a vignette (Appendix C) that explained the symptoms of ADHD and were asked to imagine themselves in a situation where they were to feign ADHD. All participants were then asked to complete the two eye movement tasks and the associated calibration trials. Fundamentally, at this time healthy controls and those with ADHD were asked to complete the tasks honestly to the best of their ability. In comparison, those in the feigning condition were asked to complete these tasks whilst pretending to have ADHD (without any over-exaggeration). On completion of the tasks, all participants were informed that they will be entered into a lottery to win a £25 and were provided with a debrief sheet (Appendix H), which explains the details of the study.  &#13;
Data Analysis &#13;
DataViewer Software Version 3.2 was used to extract and analyse the raw EyeLink data. The data was then analysed online using a bespoke software SaccadeMachine. With the software spikes and noise were removed by filtering out frames with a velocity signal greater than 1,500 deg/s or with an acceleration signal greater than 100,000 deg 2 /sec. Fixations and saccadic events were identified using the EyeLink Parser, and the saccades were extracted alongside multiple temporal and spatial variables. Trials were eliminated when the participant did not direct their gaze on to the central fixation. The temporal window of 80-700ms used and measured from the onset of the target display. Anticipatory saccades made prior to 80ms, and excessively delayed saccades made after 700ms were removed. The data thus formed consists of the latency and error rate. Latency is the time taken of the correct trial whereas the error rate is the percentage of trials the participant got wrong. Data of one individual participant from the control group was removed as their ACE score was low suggesting the probable presence of mild cognitive impairment. Due to the lack of a formal diagnosis, data of an ADHD participant was removed.  &#13;
All data was then assessed to ensure it met the assumptions required for statistical analysis. First, all data was assessed for the presence of any outliers (+/- 2SD). This analysis revealed there were 3 outliers for the both the pro- and anti-saccade measures. Given that these outliers may skew the subsequent analysis, all outliers were removed. The subsequent data was then checked to ensure it met the assumptions of normality. It was found that the prosaccade latency satisfied the normality condition (see Figure 1), hence one-way ANOVA was applied to investigate the difference in latency across the groups. As the data for prosaccade error rate was skewed (see Figure 2), Kruskal-Wallis H Test was used to determine the difference in data across the groups. Removing the outliers gave a data which satisfied normality condition for both anti-saccade latency (see Figure 3) and error rate (see Figure 4). Hence one-way ANOVA was used to test the difference for both the data across the groups and a post hoc Tukey’s Honest Significant Difference test was used to determine the significance of the difference in anti-saccade latency. &#13;
&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="45">
            <name>Publisher</name>
            <description>An entity responsible for making the resource available</description>
            <elementTextContainer>
              <elementText elementTextId="3121">
                <text>Lancaster University</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="42">
            <name>Format</name>
            <description>The file format, physical medium, or dimensions of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3122">
                <text>SPSS.sav for results&#13;
Word.doc for demographic and data acquistion form&#13;
PDF for consent form</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="43">
            <name>Identifier</name>
            <description>An unambiguous reference to the resource within a given context</description>
            <elementTextContainer>
              <elementText elementTextId="3123">
                <text>George_2022</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="37">
            <name>Contributor</name>
            <description>An entity responsible for making contributions to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3124">
                <text>Lettie and Delyth</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="46">
            <name>Relation</name>
            <description>A related resource</description>
            <elementTextContainer>
              <elementText elementTextId="3125">
                <text>None</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="44">
            <name>Language</name>
            <description>A language of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3126">
                <text>English</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="51">
            <name>Type</name>
            <description>The nature or genre of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3127">
                <text>Data and Text</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="38">
            <name>Coverage</name>
            <description>The spatial or temporal topic of the resource, the spatial applicability of the resource, or the jurisdiction under which the resource is relevant</description>
            <elementTextContainer>
              <elementText elementTextId="3128">
                <text>LA1 4YF</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="47">
            <name>Rights</name>
            <description>Information about rights held in and over the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3134">
                <text>Open</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="3195">
                <text>Dr Megan Rose Readman</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="3196">
                <text>MSc</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="3197">
                <text>Clinical&#13;
&#13;
Cognitive, Perception</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="3198">
                <text>38</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="3199">
                <text>ANOVA</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
  </item>
  <item itemId="148" public="1" featured="0">
    <fileContainer>
      <file fileId="146" order="2">
        <src>https://www.johnntowse.com/LUSTRE/files/original/055f608897628d54c7f2a243de72eb63.txt</src>
        <authentication>849ed4bf5f0ebe3ec34bccd7856d6c63</authentication>
      </file>
      <file fileId="147" order="3">
        <src>https://www.johnntowse.com/LUSTRE/files/original/0caf76688d0fd87a937daad8cef0af66.txt</src>
        <authentication>913353fac700af17d02d4381a7540773</authentication>
      </file>
      <file fileId="148" order="4">
        <src>https://www.johnntowse.com/LUSTRE/files/original/3385513f4c4cf01a4bbf9e074f9fcf10.csv</src>
        <authentication>16a611e6b866f8552c70c6cb4c5f698a</authentication>
      </file>
      <file fileId="143" order="5">
        <src>https://www.johnntowse.com/LUSTRE/files/original/8c74bde845d079abadf048bba0316db4.doc</src>
        <authentication>c06cb4848dbba3e5b81d80f0518d47b5</authentication>
      </file>
      <file fileId="149">
        <src>https://www.johnntowse.com/LUSTRE/files/original/0dfdf4ec4a7cc89c6cc485920a130a43.doc</src>
        <authentication>ebc62a1e24e476b869cb3c367f917845</authentication>
      </file>
    </fileContainer>
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <elementSetContainer>
      <elementSet elementSetId="1">
        <name>Dublin Core</name>
        <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
        <elementContainer>
          <element elementId="50">
            <name>Title</name>
            <description>A name given to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3062">
                <text>Lights, Camera, Action: Investigating Advertisement Susceptibility in Films Amongst Individuals with Parkinson’s Disease and Controls. </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="39">
            <name>Creator</name>
            <description>An entity primarily responsible for making the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3063">
                <text>Elena Ball</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="40">
            <name>Date</name>
            <description>A point or period of time associated with an event in the lifecycle of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3064">
                <text>07.09.2022</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="41">
            <name>Description</name>
            <description>An account of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3065">
                <text>Product placement is the merging of entertainment with advertising, and its presence in our daily lives is increasing. Despite this, there is an inherent lack of consideration of its influence amongst vulnerable populations such as individuals with Parkinson’s disease (PD). Research suggests that individuals with PD have reduced inhibitory control (IC) which may drive impulsive behaviours. A concernment, therefore, is the influence that product placement may have on the purchase behaviour of individuals with PD alongside a possible propensity to partake in risky and impulsive behaviours. Thus, this study aimed to examine whether reduced IC increases the likelihood that an individual with PD will be susceptible to product placement. The study adopted an experimental approach, recruiting 20 healthy younger controls, 20 healthy older controls, and 13 individuals with mild to moderate PD to participate in watching two films containing product placement; one featuring Coca Cola and the other an Audi. A pre and post product placement questionnaire was used to measure change in purchase behaviour before and after exposure to product placement, and an antisaccade eye tracking task and a Stroop task was used to measure IC. An ANOVA indicated that IC was significantly impaired in individuals with PD compared to healthy controls.  Despite this, linear mixed effects modelling suggested that IC may not be a factor that increases the likelihood that an individual will be more susceptible to product placement. Implications of these findings are discussed relative to other clinically vulnerable populations with similar cognitive impairment symptomology, and the consequent need for future research to continue to explore product placement susceptibility amongst vulnerable populations. &#13;
&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="49">
            <name>Subject</name>
            <description>The topic of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3066">
                <text>Parkinson’s Disease, Inhibitory Control, Product Placement Susceptibility &#13;
&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="48">
            <name>Source</name>
            <description>A related resource from which the described resource is derived</description>
            <elementTextContainer>
              <elementText elementTextId="3067">
                <text>Method&#13;
Participants&#13;
A voluntary sample of 54 participants were recruited, 20 healthy younger controls (YC) (16 females and four males, (Mage= 22.70, SDage= 2.42)), 20 healthy older controls of comparable age to those with Parkinson’s (OC) (females and males, (Mage= 66.85, SDage= 8.53)), and 15 adults with mild-moderate idiopathic PD (females and males, (Mage= 65.00, SDage= 7.84)). As this research area is entirely novel this sample size was modelled on comparable population studies that have explored IC (Meyer et al., 2020; Paz-Alonso et al., 2020).  YC were defined as young adults aged between 18 to 26 years old with no neurological or cognitive conditions (Stroud et al., 2015). OC were defined as adults aged between 50 to 85 years old with no neurological or cognitive conditions (Zhang et al., 2020). The participants with PD had been diagnosed with mild-moderate idiopathic PD, characterised by mild-moderate impairments of motor and cognitive functioning (DeMaagd &amp; Philip, 2015). &#13;
The exclusion criteria for both the healthy controls and individuals with PD were those who had a diagnosis of any additional neurological or cognitive conditions other than PD. Moreover, given that visual impairments may affect the visual experience of product placement, all participants were screened for red-green colour blindness using the Ishihara test. The standardised cut off for normal vision is 15 (Rodriguez-Carmona &amp; Barbur, 2017), therefore, participants who score 14 or less were excluded as this is indicative of the presence of red-green colour blindness. &#13;
All participants had normal or corrected-to-normal vision. The Addenbrooke’s Cognitive Examination-III (ACE) was used to screen for the presence of cognitive impairment (Bruno &amp; Vignaga, 2019). Participants’ data was only included in analysis if participants achieved a score within the normal range (≥ 82 out of 100). Following this exclusion criteria, one PD participant’s data was removed. Research has shown saccadic eye movements to be influenced by cognitive dysfunction (Hutton, 2008; MacAskill et al., 2012), thus cognitive impairments need to be screened for as this study is measuring saccadic eye movements as a measure of IC. Subsequently, following exclusion criteria, 53 participants’ data was included within analysis.  &#13;
PD participants were selected who were at a Hoehn and Yahr Stage three or less (see Table 1 for background characteristics for participants attached in the files below). The Hoehn and Yahr is used to give a summary of the laterality and severity of PD symptomology (Readman et al., 2021b). Five participants presented unilateral symptoms only (stage one), seven participants presented bilateral symptoms with no impairment of balance (stage two) and one participant presented bilateral symptoms with some postural instability but were not physically dependent (stage three). PD symptomology was assessed using the Movement Disorder Society Unified Parkinson’s Disease Rating Scale (MDS-UPDRS) (Evers et al., 2019). All PD participants were tested under their usual medication regimes and were in a typical functioning ‘ON’ phase. Eight participants were taking a dopamine agonist (e.g., Ropinirole), eight participants were taking a combination drug (e.g., Madopar), six participants were taking a monoamine oxidase inhibitor (e.g., Rasagiline), and two participants were taking a Catechol-O-Methyl Transferase (e.g., Entacapone). &#13;
YC were recruited through the researcher’s social network. Whereas both OC and individuals with PD were recruited established research interest databases (OC C4AR database; PD MRR PD interest database (FST2005)).  &#13;
Materials&#13;
Health and Demographic Questionnaire&#13;
	The health and demographic questionnaire (HADQ) was developed and distributed using Qualtrics (Qualtrics, 2022), an online software that aids the process of building, distributing, and analysing surveys (Carpenter et al., 2019). The HADQ was comprised of four distinct subsections pertaining to both the participants general demographics, and more specific health related measures.&#13;
	Demographic Questions. For participant group allocation, participants were asked for their age, sex, and whether they held a diagnosis of PD. Information about participants’ age also afforded the opportunity for exploration into the possible effect of age as well as PD on product placement susceptibility.  &#13;
The Hospital Anxiety and Depression Scale (HADS). The HADS is a 14 item (7 items pertaining to anxiety and 7 items pertaining to depression) self-report assessment of anxiety and depression suitable for both psychiatric and non-psychiatric populations (Stern, 2014). All items are rated on a 4-point severity scale with a total score of 11 or more being indicative of probable anxiety and depression respectively (Caci et al., 2003; Edelstein et al., 2010). Literature has found HADS to be high in construct validity and very good internal consistency was observed when measuring anxiety (Cronbach’s α = .83) and depression (Cronbach’s α = .82) (Bjelland et al., 2002; Johnston et al., 2000; Mondolo et al., 2006). &#13;
	Edinburgh Handedness Inventory. The Edinburgh Handedness Inventory is a 10-item self-report questionnaire in which participants are asked to indicate a preference for which hand they would use when completing a range of daily activities (e.g., brushing teeth) (Robinson, 2013). Through this a handedness score ranging from 100 (strong right) to -100 (strong left) deduced.  Excellent internal consistency was observed in the 10-item Edinburgh Handedness Inventory (Cronbach’s α = .94) (Fazio et al., 2013). Previous literature suggests that handedness and eye-dominance are correlated because of hemispheric specialisation (McManus,1999; Willems et al., 2010), therefore establishing participants’ handedness was indicative of their dominant eye when measuring IC through saccadic eye movements. &#13;
PD Diagnosis questions. Participants with PD were asked to provide specifics relating to their diagnosis, including years since diagnosis, years since presumed onset, and what medication, and its dosage, they are prescribed. These items were necessary to investigate whether PD severity and medication type influence product placement susceptibility.&#13;
Screening Assessments&#13;
	Cognitive Impairments. The Addenbrooke’s Cognitive Examination-III (ACE) is a cognitive assessment that screens for the probable presence of cognitive impairments (Noone, 2015). The ACE is comprised of 24 items that analyse attention, memory, fluency, language, and visuospatial processing (Bruno &amp; Vignaga, 2019). Very good internal consistency was observed in the ACE (Cronbach’s α = .88) (Kan et al., 2019) and validity (Matias-Guiu et al., 2017; Takenoshita et al., 2019). &#13;
Visual Impairments. The Ishihara test is a reliable (Birch, 1997) 17 item assessment for red-green colour blindness that requires participants to read aloud a set of numbers on Ishihara plates that are made up of coloured dots (Marey et al., 2015). &#13;
PD Symptomology. MDS-UPDRS is a tool to measure the progression of PD symptomology (Evers et al., 2019). MDS-UPDRS is comprised of a series of tasks that assesses PD symptomology within the last week, in the domains of mentation, behaviour and mood, activities of daily life, motor abilities, and complications of therapy (Holden et al., 2018). Very good internal consistency was observed in the MDS-UPDRS (Cronbach’s α = .90) (Abdolahi et al., 2013) and valid assessment of PD symptomology severity (Goetz et al., 2008; Metman et al., 2004). &#13;
Measures of Inhibitory Control &#13;
	Eye Tracking Tasks. The prosaccade and antisaccade tasks were created using Experiment Builder Software Version 1.10.1630 and the data was extracted and analysed using Data Viewer Software. Eye movements were recorded via the EyeLink Desktop 1000 at 500 Hz. Whilst recording eye movements, participants were asked to place their chin on a chin rest to reduce their head movements. Participants sat approximately 55cm away from the computer monitor (monitor run at 60Hz). &#13;
Firstly, participants were asked to complete the 4-point calibration task to improve eye tracking accuracy (Pi &amp; Shi, 2019). In this task participants were asked to follow a red target around the screen as it moved up, down, left, and right. Next, participants completed the prosaccade eye tracking task. To centralise participants’ gaze, participants were instructed to look at a white fixation target displayed on a computer screen for 1000ms. Participants were then instructed to look towards a red lateralised target that appeared on screen for 1200ms at a 4o visual angle either to the left or to the right of where the white central dot had been located, as quickly and as accurately as possible (Readman et al., 2021a). The eye tracking equipment measured participants’ saccades and latencies (how long it took for participants to fixate on the red target). A total of 16 gap trials were presented with a blank interval screen displayed for 200ms between the extinguishment of the white fixation target and the initial appearance of the red target, which resulted in a temporal gap in stimuli presentation. The prosaccade task was incorporated to ensure that alternations in participants antisaccade task performance were not due to impaired prosaccades and rather are indicative of alterations in IC. &#13;
For the antisaccade task, participants were first asked to look at a central white fixation dot for 1000ms to centralise their gaze. Participants were then asked to direct their gaze and attention focus to the opposite side of the screen to where a green lateralised target was presented for 2000ms at a 4o visual angle either to the left or to the right of where the white central dot had been located, as quickly and accurately as possible (Derakshan et al., 2009). See figure 1 above for a visual display of an antisaccade task. The eye tracking equipment measured participants’ saccades, latencies (how long it took participants to fixate their gaze to the opposite direction to the green target), and error rates (how many time participants incorrectly looked at the green target). A total of 16 gap trials were presented with a blank interval screen displayed for 200ms between the extinguishment of the white fixation target and the initial appearance of the red target, which resulted in a temporal gap in stimuli presentation. &#13;
	Stroop Test. The Stroop test was conducted using PsyToolkit’s free online demonstration (PsyToolkit, 2022). Unlike in the original Stroop test whereby participants had to say the ink colour aloud (Stroop, 1935), using PsyToolkit’s online Stroop test allowed for a more accurate measurement of participant’s reaction time (ms) through pressing the key corresponding to the ink colour (Brenner &amp; Smeets, 2018). Participants completed the Stroop test on a HP ProBook 470 G5 17.3” laptop (HP, 2022), and were sat approximately 30cm away from the laptop. Presenting the Stroop test on this laptop enabled participants to view the test on a large screen, thus improving the accessibility of the test. The colour words presented to participants were ‘red’, ‘green’, ‘yellow’, and ‘blue’.&#13;
	Participants were instructed to press the key corresponding to the initial letter of the ink colour of the printed word presented on screen as quickly and accurately as possible. For example, the correct answer for RED would be if the participant pressed the key ‘B’ for blue. A total of 40 gap trials were presented. For each trial, a colour word was presented on screen for 2000ms. The colour word was either congruent (the colour word and the meaning are the same, e.g., GREEN) or incongruent (the colour word and the meaning is different, e.g., GREEN). There was a 100ms gap in presentation of the word in which a white cross was presented on a black interval screen. Participants’ congruent and incongruent reaction times (ms), correct Stroop score (correctly identified ink colour out of 40), and Stroop effect (incongruent reaction time (ms) minus congruent reaction time (ms)) were recorded.&#13;
The ease at which the Stroop test can be conducted in a non-laboratory environment and the simplicity at which the colour words can be translated into other languages, increases its accessibility and universality as a measure of IC (Gass et al., 2013). This assessment would, however, be an invalid measure of IC for individuals affected by colour blindness or dyslexia, limiting the populations the Stroop task can assess (Scarpina &amp; Tagini, 2017). &#13;
Product Placement Film Clips&#13;
The incorporation of film clips containing product placement was guided by the prominent use of film clips within previous research that had investigated product placement susceptibility (Kamleitner &amp; Jyote, 2013; Yang &amp; Roskos-Ewoldsen, 2007). Jurassic World featuring Coca Cola and Avengers Endgame featuring Audi were chosen as they were popular films that contained product placement that both younger and older adults would recognise (Malaj, 2022), minimising the effects of familiarity. Furthermore, these two film clips were chosen as they contained product placement of products of different monetary value products. Thus, controlling for the potential effects of monetary value on product placement susceptibility (McDermott et al., 2006). &#13;
	Both film clips were downloaded from Youtube and trimmed to last approximately one minute each to lessen the study length because of the propensity for individuals with PD to tire because of the symptomology they present with (see Appendix A for the screen shots of the two film clips). The two film clips were shown on a HP ProBook 470 G5 17.3” laptop because the large screen enhanced participants’ visual experience of product placement (HP, 2022).&#13;
Measure of Purchase Intention&#13;
	Separate pre and post product questionnaires for each clip were made using Qualtrics (Qualtrics, 2022). To measure purchase behaviour, participants were asked how strong their preference was to buy those drink/car brands on a Likert scale of one to seven (from one = “Extremely unlikely” to seven = “Extremely likely”). Literature has found 7-point Likert scales to be a more reliable scale because it allows for more accurate and differentiated responses than smaller scales like 5-point Likert scales (Cicchetti et al., 1985; Finstad, 2010). The use of a 7-point Likert scale therefore gained a more sensitive and accurate measurement of product placement susceptibility. Both the pre and post product placement questionnaires asked participants the same questions therefore enabling us to measure if there was a change in participants’ responses prior to and after exposure to product placement (Matthes et al., 2007).&#13;
Design&#13;
	The study used a 3 between (Participant Status: Healthy Young Controls vs. Healthy Older Controls vs. Individuals with Parkinson’s Disease) x 2 within (Product Placement Category: Drink vs. Car) mixed-subjects design.&#13;
Procedure&#13;
As this study recruited a vulnerable population, the information sheet was sent to participants via email 48 hours prior to the in-person study. This afforded participants the time to ask questions or express any concerns about the study before then being sent the consent form 24 hours prior to commencing the in-person study. Once participants had read and completed the digital consent form, participants were sent the digital HADQ. The HADQ took participants approximately 10 minutes.  &#13;
	Prior to the main study, participants were screened for cognitive impairment, using the ACE, and visual impairment, using the Ishihara test. At this time the severity of Parkinson’s symptomology was assessed using the MDS-UPDRS where appropriate.&#13;
	On completion of all pre-study screening, participants were asked to firstly complete a prosaccade eye tracking task and then an antisaccade eye tracking task which took approximately 10 minutes. &#13;
	Participants were then asked to complete a pre product placement questionnaire and then watch a short film clip. After watching the film clip, participants were asked to complete a post product placement questionnaire. Finally, participants were asked to complete the Stroop test which took approximately five minutes to provide a further measure of IC and to act as a buffer in time. &#13;
	This process was repeated for a second product category condition. The order of condition completion was randomly counterbalanced across participants to increase internal validity by minimising the potential for order effects (Corriero, 2017). The in-person study lasted approximately an hour for healthy controls and an hour and 30 minutes for PD. At the end of the study, participants were read and given a copy of the debrief sheet, thanked for their participation and time, and given £10 as a contribution towards travel expenses. All raw data was stored on the Lancaster University OneDrive, on a password-protected computer.&#13;
Data Analysis&#13;
	The raw data from the prosaccade and antisaccade tasks were extracted using the EyeLink DataViewer Software (Version 3.2) and processed using the bespoke software SaccadeMachine (Mardanbegi et al., 2019). Noise in the dataset was removed by filtering out frames with a velocity signal greater than 1,500 deg/s or with an acceleration signal greater than 100,000 deg2/s. The EyeLink Parser was used to detect fixations and saccadic events. Saccades were extracted alongside multiple temporal and spatial variables. Trials were excluded in cases when the participant did not direct their gaze to the central fixation target. The onset of target display was a temporal window of 80-700ms, thus anticipatory saccades made prior to 80ms and excessively delayed saccades made after 700ms were removed.&#13;
	To improve data analysis reproducibility, statistical analyses were conducted using RStudio (version 2022.09.0) (Quick, 2010). To prepare the Stroop test data for analysis, participants’ Stroop scores (correctly identified ink colour out of 40), congruent and incongruent trial reaction times (ms), and Stroop effect (incongruent trials reaction time (ms) minus the congruent trials reaction time (ms)) were downloaded from Psytoolkit into an Excel file. IC was operationalised as the Stroop effect (Kane &amp; Engle, 2003). &#13;
	To investigate the susceptibility to product placement, a difference in purchasing behaviour score was calculated for each product. To do so, the pre product placement ratings of the likelihood of purchasing each brand were subtracted from the post product placement ratings of the likelihood of purchasing each brand. A positive difference was indicative of participants being more likely to buy the featured product after exposure to product placement, a negative difference suggested that participants were less likely to buy the featured product, and a difference of zero indicated no change in purchase behaviour. &#13;
	First to confirm the assumption that is impaired in individuals with PD compared to healthy controls, three separate between-factor ANOVAs were performed to compare the main effect of group (YC, OC, and PD) on antisaccade latency, antisaccade error rate, and Stroop effect (See Appendix B for R code). A between-factor ANOVA was chosen because it compares three or more categorical groups to establish whether there is a significant difference on a dependent measure (Henson, 2015). As ANOVA results only identify a difference between groups, post hoc Tukey HSD tests for multiple comparisons were conducted to determine where the differences lie between groups (Abdi &amp; Williams, 2010). &#13;
	To investigate whether IC influences product placement susceptibility, a linear mixed effects modelling (LMM) was fitted. The LMM fitted incorporated difference in purchase behaviour scores (differencescore) as the outcome, and group (PD v Healthy older control v Healthy younger control) and measures of IC (antisaccade latency, antisaccade error rate, and Stroop) as the fixed effects. Given that IC is part of an individual’s executive function (Crawford et al., 2002), ACE score (as a measurement of the participants overall cognitive function; Noone, 2015) was also fitted as a fixed effect. As LMM allows for the analysis of fixed effects of independent variables, whilst also considering unexplained differences corresponding to random effects like participant variation (Baayen et al., 2008).  Random effects of both participants and product (Car or Drink) on intercepts were added (See Appendix C for R code). The LMM was fitted using the Satterthwaite adjustment method in lme4 package (Bates et al., 2014) in R (version 2022.09.0) (Quick, 2010). &#13;
Ethics&#13;
	This study received ethical approval from the Psychology Department Research at Lancaster University on the 22/06/2022 and complied to The British Psychological Society’s guidelines (2014).&#13;
&#13;
&#13;
&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="45">
            <name>Publisher</name>
            <description>An entity responsible for making the resource available</description>
            <elementTextContainer>
              <elementText elementTextId="3068">
                <text>Lancaster University</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="42">
            <name>Format</name>
            <description>The file format, physical medium, or dimensions of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3069">
                <text>Data/R.csv</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="43">
            <name>Identifier</name>
            <description>An unambiguous reference to the resource within a given context</description>
            <elementTextContainer>
              <elementText elementTextId="3070">
                <text>Ball2022</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="37">
            <name>Contributor</name>
            <description>An entity responsible for making contributions to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3071">
                <text>Elena Ball</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="47">
            <name>Rights</name>
            <description>Information about rights held in and over the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3072">
                <text>Open</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="46">
            <name>Relation</name>
            <description>A related resource</description>
            <elementTextContainer>
              <elementText elementTextId="3073">
                <text>N/A</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="44">
            <name>Language</name>
            <description>A language of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3074">
                <text>English</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="51">
            <name>Type</name>
            <description>The nature or genre of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="3075">
                <text>Data</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="38">
            <name>Coverage</name>
            <description>The spatial or temporal topic of the resource, the spatial applicability of the resource, or the jurisdiction under which the resource is relevant</description>
            <elementTextContainer>
              <elementText elementTextId="3076">
                <text>LA1 4YF</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="3077">
                <text>Dr Megan Readman</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="3078">
                <text>MSc</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="3079">
                <text>Psychology of Advertising</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="3080">
                <text>53 Participants. 20 healthy younger controls, 20 healthy older controls, 13 individuals with mild-moderate Parkinson's disease</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="3081">
                <text>ANOVA&#13;
Linear Mixed Effects Modelling</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
  </item>
  <item itemId="118" public="1" featured="0">
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <itemType itemTypeId="14">
      <name>Dataset</name>
      <description>Data encoded in a defined structure. Examples include lists, tables, and databases. A dataset may be useful for direct machine processing.</description>
    </itemType>
    <elementSetContainer>
      <elementSet elementSetId="1">
        <name>Dublin Core</name>
        <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
        <elementContainer>
          <element elementId="50">
            <name>Title</name>
            <description>A name given to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2560">
                <text>Infants' Awareness of Number: Innate Ability or Perceptual Bias?</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="39">
            <name>Creator</name>
            <description>An entity primarily responsible for making the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2561">
                <text>Jessica Sparks</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="40">
            <name>Date</name>
            <description>A point or period of time associated with an event in the lifecycle of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2562">
                <text>07.09.2021</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="41">
            <name>Description</name>
            <description>An account of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2563">
                <text>In order to identify the origin of our understanding of numerosity and arithmetic abilities, it is essential that such abilities are measured in infants. In Wynn’s (1992) study, a case was made for an innate ability to perform arithmetic operation on small number sets as it was demonstrated that infants would look longer at displays that violated their expectations of number. However, research in the years following this seminal study cast doubt on this interpretation of infants’ behaviour. Other research has suggested that perceptual biases are at play, rather than infants possessing a symbolic understanding of number. To address the contrasting finding in this area of developmental research, this study set out to analyse preexisting data to investigate the factors that influence infants’ abilities to track objects over occlusion and to identify the most appropriate level of interpretation of this ability The present study recruited a sample of 32 infants across two experiments. Adapting the methodology from Wynn (1992), Experiment 1 measured looking time when an object was revealed to be missing from the display, violating infants’ expectation of presence. Experiment 2 measured looking time when an object was revealed to be in the incorrect position on the stage, violating infants’ expectation of position. It was found that infants violation trial had a significant effect on looking time and whether the object missing was the first or last to be placed had a significant effect on looking time in violation of presence conditions</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="49">
            <name>Subject</name>
            <description>The topic of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2564">
                <text>Addition, subtraction, Number, Object Tracking, object files, Infant perception</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="48">
            <name>Source</name>
            <description>A related resource from which the described resource is derived</description>
            <elementTextContainer>
              <elementText elementTextId="2565">
                <text>Participants:  &#13;
In this study, participants were 32 infants aged 5- to 7-months, (M = 188.38 days, SD = 10.51, range = 175 – 218). Infants were 15 males and 17 females. 16 participants were used in each experiment. In Experiment 1, participants were 7 males and 9. In Experiment 2, participants were 8 males and 8 females. Participants in each experiment were matched based on age.  &#13;
Apparatus &amp; Stimuli: &#13;
The experiment took place in a dimly lit test room, with displays presented on a grey stage measuring 64cm wide by 40cm high and 31cm deep. An 8.5cm high black screen located 31.5cm behind the front of the stage was used to occlude the display by being rotated upwards. The display also consisted of a 30cm rotating platform that allowed different configurations of objects to be rotated rapidly. The objects used in this study were two 12.5cm high by 9.5cm wide toy hedgehogs that squeaked when squeezed. These toys were magnetic at the bottom.  &#13;
Procedure:  &#13;
Infants were sat in either a high seat or on a caregiver’s lap, 60cm from the front edge of the stage. In cases where infants were sat on a caregiver’s lap, the caregiver’s eyes were above the stage as to avoid them seeing the display and possibly influencing the infant’s behaviour. After gaze calibration to ensure the accuracy of eye-tracking measures, the procedure closely followed that of Wynn (1992) and Bremner et al (2017).  &#13;
Three pre-test (baseline) trials were presented initially. These resulted in the correct outcome of the operation as well as the two incorrect outcomes in counterbalanced order. The screen was lowered to reveal either one or two toys, depending on the trial, and the observer recorded where the infant looked on the stage. In terms of the location of the toys in trials, when one was presented, it was placed 7.5cm to the right of the stage’s centre. When two toys were presented, the second toy was placed 7.5cm to the left of the stage’s centre. Pre-test trials continued until the infant accumulated at least 2 seconds of looking time and looked away from the display for seconds or more. When this was achieved, the screen was raised and the same procedure was repeated for the displays for the other two outcomes.  &#13;
Test trials were administered in two blocks of four trials. The experimenter’s hand emerged at one side above the screen. The side at which the toy first appears was counterbalanced across participants. The toy squeaked to capture the infant’s attention and continued to squeak to maintain this attention as it was placed on one of the locations used during the correct outcome familiarisation trial. The experimenter then slowly withdrew their hand, clasping and unclasping the hand to show the infant that it was empty, and the screen was then raised to occlude the toy from the infant’s view. The time taken from the appearance of the toy to the withdrawal of the hand took approximately 5 seconds. The experimenter’s hand then reappeared above the screen from the opposite side of the display, holding an identical squeaking toy. Once the infant’s attention had been captures, the toy was placed in the other location used during correct outcome familiarisation trials. The hand was then raised and, again, clasped and unclasped to show the infant the hand was empty. The hand as then slowly withdrawn from the display. The screen was then lowered to reveal either the correct or incorrect outcome.  &#13;
In Experiment 1, conditions involved violation of object presence. In ‘added object absent’ trials, the screen was lowered to reveal the last object to be placed was missing from the display. In ‘original object absent’ trials, the screen was lowered to reveal the first object to be placed, present before the screen was raised, was missing from the display. In Experiment 2, conditions involved violation of object position. In ‘added object in wrong location’ trials, the screen was lowered to reveal the last object to be placed appeared in the centre of the stage rather than on the side of the stage in which it was placed. In ‘original object in wrong location’ trials, the screen was lowered to reveal the original object in the display appeared in the centre of the stage rather than on the side it was in before the screen was raised.  &#13;
These test trials continued until the infant had accumulated at least 2 seconds of looking tie and looked away from the display for 2 seconds or more. &#13;
&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="45">
            <name>Publisher</name>
            <description>An entity responsible for making the resource available</description>
            <elementTextContainer>
              <elementText elementTextId="2566">
                <text>Lancaster University </text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="42">
            <name>Format</name>
            <description>The file format, physical medium, or dimensions of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2567">
                <text>.csv</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="43">
            <name>Identifier</name>
            <description>An unambiguous reference to the resource within a given context</description>
            <elementTextContainer>
              <elementText elementTextId="2568">
                <text>Sparks2021</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="37">
            <name>Contributor</name>
            <description>An entity responsible for making contributions to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2569">
                <text>Julonna Peterson and Rebecca Mitchell</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="47">
            <name>Rights</name>
            <description>Information about rights held in and over the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2570">
                <text>open</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="46">
            <name>Relation</name>
            <description>A related resource</description>
            <elementTextContainer>
              <elementText elementTextId="2571">
                <text>Wynn's 1992 study</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="44">
            <name>Language</name>
            <description>A language of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2572">
                <text>English</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="51">
            <name>Type</name>
            <description>The nature or genre of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="2573">
                <text>Data</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="38">
            <name>Coverage</name>
            <description>The spatial or temporal topic of the resource, the spatial applicability of the resource, or the jurisdiction under which the resource is relevant</description>
            <elementTextContainer>
              <elementText elementTextId="2574">
                <text>Developmental </text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="2642">
                <text>Gavin Bremner</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="2643">
                <text>MSC</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="2644">
                <text>Developmental</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="2645">
                <text>32</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="2646">
                <text>ANOVA</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
    <tagContainer>
      <tag tagId="4">
        <name>infant perception</name>
      </tag>
    </tagContainer>
  </item>
  <item itemId="83" public="1" featured="0">
    <fileContainer>
      <file fileId="41">
        <src>https://www.johnntowse.com/LUSTRE/files/original/70e8b6f0e20b7e3f46e642c7284bd8a8.doc</src>
        <authentication>6d2e0f9e5936d11253c9ab16b9bc1842</authentication>
      </file>
    </fileContainer>
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <elementSetContainer>
      <elementSet elementSetId="1">
        <name>Dublin Core</name>
        <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
        <elementContainer>
          <element elementId="50">
            <name>Title</name>
            <description>A name given to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1913">
                <text>Experiencing social acceptance and rejection through ‘likes’ and ‘dislikes’: Does sleep quality affect the processing of social rewards?</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="39">
            <name>Creator</name>
            <description>An entity primarily responsible for making the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1914">
                <text>Abigail Taylor-Spencer</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="40">
            <name>Date</name>
            <description>A point or period of time associated with an event in the lifecycle of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1915">
                <text>2018</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="41">
            <name>Description</name>
            <description>An account of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1916">
                <text>In adolescence, high importance is placed on peer evaluations and social rewards have increased salience during this developmental period. Sleep patterns also change in adolescence, as teenagers typically experience insufficient sleep. This research measured the pupil dilation of forty-four adolescents aged 16 to 18 using two tasks (audio and visual) to investigate whether sleep duration influenced the way social acceptance and rejection were processed. Sleep duration scores were obtained using the measure of sleep debt; this was calculated by subtracting sleep duration during the week from sleep duration at the weekend, plus weekday bedtime. It was expected that higher sleep debt would be linked to increased pupil reactivity towards social feedback and that there would be a greater pupil dilation in response to social rejection compared to social acceptance. In the visual task, it was found that sleep debt affected males and females differently when processing social rewards, as females with high sleep debt showed increased pupil dilation towards positive feedback compared to negative feedback, whereas males with low sleep debt showed a larger dilation towards positive feedback than females. It was also found that females with lower sleep debt gave more likes than dislikes when rating photos. This implies that sleep duration affects the social feedback adolescents provide. When a male voice was used in the audio task, more pupillary reactivity towards social acceptance was observed, however when a female voice was used, pupils dilated more in response to social rejection. Future research should further investigate these gender differences.</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="49">
            <name>Subject</name>
            <description>The topic of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1917">
                <text>Adolescence&#13;
 Pupil dilation&#13;
Social feedback&#13;
 Reward&#13;
 Rejection&#13;
Sleep debt.</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="48">
            <name>Source</name>
            <description>A related resource from which the described resource is derived</description>
            <elementTextContainer>
              <elementText elementTextId="1918">
                <text>Participants&#13;
	Forty-four participants (N=44) were recruited from Haslingden High School and Sixth Form to participate in this research. The participants (35 female, 9 male) were all between the ages of 16 and 18 (Mage = 16.98, SDage = .63). Students in Psychology, Sociology and English classes were given the opportunity to participate in the research and contacted the researcher via email if they wished to participate. Each participant provided their informed consent before beginning the study.&#13;
Materials&#13;
	Photo ratings. Firstly, the participants were shown a PowerPoint containing 40 photos, which had been previously collected by the researcher, and featured adolescents which the participants did not know. Each photo was displayed individually for four seconds, meaning that the presentation lasted two minutes and forty seconds in total. Participants were provided with a sheet of paper on which they had an option to tick either ‘like’ or ‘dislike’ for each photo on the PowerPoint (see Appendix A). The total number of likes was calculated for each participant.&#13;
	Eye tracker. An eye-tribe desktop eye tracker with a 30Hz sampling rate was used to measure the pupil dilation of the participants in response to stimuli on two tasks - a visual task and an audio task. A chin rest was used to ensure the participants kept their heads still.&#13;
	Visual task. The visual task involved showing the participants the same 40 photos which they had previously been shown in the photo rating task, however, each photo had either a ‘like’ symbol or ‘dislike’ symbol (see Figure 1) in the bottom right hand corner. Participants were informed prior to beginning the task that if a photo contained the ‘like’ symbol, it meant that the individual in the photo had liked the participant’s picture, however the ‘dislike’ symbol meant that the individual in the photo had disliked the participant’s picture. The presentation of photos was randomised across participants&#13;
&#13;
Audio task. The audio task involved the participants listening to forty voice recordings, which each lasted between six and seven seconds in length. Twenty of these recordings were nice comments and twenty were nasty comments, which were found on online social media platforms. An example of a nice comment is; ‘You look unreal and your outfit is amazing. You are a true inspiration to everyone’ and an example of a nasty comment is; ‘You are so fake, and you are such a liar. Every single thing you say is a lie’ (see Appendix B for the complete list of comments). A male voice read out half of the nice and half of the nasty comments, and a female voice featured in the other half of the recordings. The nice comments were characterised as positive social feedback, and the nasty as negative social feedback. The presentation of nice and nasty comments was randomised across participants. The audio material was rated for emotional valence and arousal; the former being how positive or negative the recordings were, and the latter being the intensity of this positivity or negativity (Citron, Gray, Critchley, Weekes, &amp; Ferstl, 2014). See Appendix C for the emotional valence and arousal scores, which were rated by six individuals using Qualtrics. Presentation of the nice and nasty comments was randomised across participants.&#13;
	Questionnaires. Participants were asked to complete two questionnaires; one which was an adaptation of the MCTQ questionnaire (Munich ChronoType Questionnaire; Roenneberg, Wirz-Justice &amp; Merrow, 2003), to identify the sleeping patterns of the participants (see Appendix D), and a questionnaire about their social media use (see Appendix E) which was used to maintain the ruse that the study was interested in the participants’ social media use.&#13;
	This study received ethical approval from Lancaster University on 05/04/2018.&#13;
Design&#13;
	Variables. The dependent variable in this study was pupil size, which was measured in arbitrary units, using an eye tribe eye tracker. An average pupil diameter was calculated for each trial; each participant had 40 average pupil size measurements in the visual task and 40 average pupil diameter measurements in the audio task. The dependent variables of median and area under the curve were used. The independent variables in the study were; feedback valence, sleep debt, gender voice and gender.&#13;
	Feedback valence. The feedback was within subjects, as all forty-four participants experienced both positive and negative feedback in both tasks. In the visual task, all participants saw twenty people who had supposedly ‘liked’ their photo, and twenty people who had supposedly ‘disliked’ their photo. In the auditory task, all participants heard twenty positive comments and twenty negative comments. This was analysed to assess whether varying pupillary responses were elicited towards positive and negative social feedback.&#13;
	Sleep debt. Sleep debt was determined by the MCTQ (Roenneberg et al., 2003); a value of sleep debt was calculated by subtracting sleep duration during the week from sleep duration at the weekend, plus weekday bedtime. Participants were split into two groups; high sleep debt and low sleep debt. Those with a high sleep debt had less weekday sleep and greater weekend sleep, which is a marker of poor sleep quality. This was a between subject factor, as half of the participants were in the high sleep debt group, and half in the low sleep debt group.&#13;
	Voice Gender. In the audio task, half of the audio clips featured a male voice, and half featured a female voice, therefore this was a between subject factor. This was analysed to investigate whether the gender of the voice or pictured individual had an effect on the pupillary responses.&#13;
	Gender. In the visual task, the gender of the participants was investigated as a between subjects factor, as nine of the participants were male, and thirty-five were female.&#13;
	Audio task. The design of the audio task was a factorial design with a between subjects factor of sleep debt (which had two levels – low and high) and a within subjects factor of social feedback valence (two levels: positive and negative) and a second within subjects factor of voice gender (two levels: male and female).&#13;
	Visual task. The design of the visual task was a factorial design with a between subjects factor of sleep debt (which had two levels – low and high) and within subjects factors of social feedback valence (two levels: positive and negative) and participant gender (two levels: male and female).&#13;
Procedure&#13;
	Approximately two weeks prior to the beginning of data collection, students in Psychology, Sociology and English classes at Haslingden Sixth Form were contacted and given the opportunity to participate in this research. Those who were interested in participating, and would provide consent, were asked to send a picture containing only themselves (eg. a Facebook profile picture) to the researcher via email for use in the study. The participants were informed that the photo they sent would be liked or disliked by students at another school, and that that during the study, there would be an opportunity to like or dislike photos of the individuals who rated their picture. No other information about the other ‘students’ was provided. The participants were led to believe that the study was investigating whether social media use affects responses to being judged online, and whether the use of social media affects sleep patterns in adolescence.&#13;
	All participants were tested in the same office in Haslingden High School and Sixth Form. Participants were invited into the office and invited to sit down a desk which featured an eye-tribe eye tracker, 24-inch iMac monitor and keyboard, and a chin rest was placed 50 cm away from the eye tracker. The computer had MatLab 2015 installed. Each participant was provided with an information sheet (see Appendix F), and was given the opportunity to ask any questions, before signing an informed consent form (see Appendix G) if they still wished to participate and consented to partake in the study.&#13;
	Once the consent form had been signed, the photo rating task was explained. This task involved presenting forty photos to the participants using Microsoft PowerPoint. The photos were shown individually; each photo was on an individual slide, and each one was presented for four seconds. The participants were asked to mark whether they ‘liked’ or ‘disliked’ each photo on a sheet of paper (see Appendix A). The presentation was on an automatic timer however, the participants were informed that if a slide moved on too quickly, the left arrow key would take them back to the previous slide, and the timed presentation would continue by pressing the right arrow key. The participants were led to believe that the photographs they were rating were of the individuals who had rated their photos. The eye tracker was not used during this task.&#13;
	Next, the participants were asked to place their head on the chin rest, and the eye tracker was calibrated. Participants were asked to keep their heads as still as possible, and to move their eyes towards the dots as they appeared on the screen. The calibration was accepted when three stars or above was achieved, and the eye tracker was used for both the visual and auditory tasks. The order in which the tasks were completed was counterbalanced, therefore half of the participants completed the visual task first, and half completed the auditory task first. The participants were informed what would happen during each task and were given the opportunity to ask any questions before the tasks began.&#13;
	The participants were told that, in the auditory task, they would hear forty voice clips; twenty nasty and twenty nice. They were asked to look at a black cross that was located in the centre of the screen whilst the voice clips were playing. Ten of the ‘nice’ clips and ten of the ‘nasty’ clips were read aloud by a female, and the remaining were read by a male voice. The nice and nasty comments which featured in the voice clips were found on online social media platforms (see Appendix B for the full list of comments used), however the participants were asked to imagine that the comments had been directed towards themselves. Participants were told that, in the visual task, they would view the photographs which they had previously ‘liked’ or ‘disliked’ in the photo rating task. However, this time, the photos would either have a ‘like’ thumb or a ‘dislike’ thumb in the bottom right hand corner (see Figure 2 and Figure 3 for examples). If a photo had a ‘like’ thumb, it meant that person had supposedly liked the participant’s photo, whereas a ‘dislike’ thumb meant the individual in the photo had disliked the participant’s photo. Half of the participants completed the visual task first, and half of the participants completed the audio task first; the tasks were counterbalanced to determine whether the order in which they were presented influenced the outcome.&#13;
After finishing both the visual and auditory tasks, participants were asked to complete two questionnaires; the MCTQ (Roenneberg et al., 2003) to determine a sleep debt score and a questionnaire on social media use. After completing the questionnaires, participants were informed that their photo had not actually been seen or rated by pupils at another school, and that the ratings which they gave in the photo rating task wouldn’t be seen by the individuals in the photos. Participants were then provided with a debrief sheet (see Appendix H) and given the opportunity to ask any questions they may have had.&#13;
Analysis&#13;
Preliminary data analysis. In order to measure the magnitude of change in pupil dilation and compare across the conditions, each trial pupil size was baseline adjusted by subtraction of the mean pupil size in the 300ms prior to stimulus onset from each sampled value during the further 4 seconds of stimuli presentation. The area under the curve and median were then calculated from the trial level baseline adjusted data to provide the dependent variables in the analysis. These were used as dependent variables to show the magnitude and duration of the effects. The median was used as opposed to the mean because the median is less likely to be skewed by outliers.&#13;
Two multilevel mixed effects general linear mixed models (GLMM) were used to analyse the data for the two tasks with participant included as a random effect with intercept. An AR(1) heterogeneous first order autoregressive structure with homogenous variances was selected because it was expected that the error variance would become less correlated as the trials became further apart. The total number of likes each participant gave on the photo rating task was calculated and a 2 (gender: male vs. female) x 2 (sleep debt: low vs. high) between factor analysis of variance (ANOVA) was carried out.&#13;
&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="45">
            <name>Publisher</name>
            <description>An entity responsible for making the resource available</description>
            <elementTextContainer>
              <elementText elementTextId="1919">
                <text>Lancaster University</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="42">
            <name>Format</name>
            <description>The file format, physical medium, or dimensions of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1920">
                <text>data/SPSS.sav</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="43">
            <name>Identifier</name>
            <description>An unambiguous reference to the resource within a given context</description>
            <elementTextContainer>
              <elementText elementTextId="1921">
                <text>Taylor-Spencer2018</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="37">
            <name>Contributor</name>
            <description>An entity responsible for making contributions to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1922">
                <text>Ellie Ball</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="47">
            <name>Rights</name>
            <description>Information about rights held in and over the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1923">
                <text>Open</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="46">
            <name>Relation</name>
            <description>A related resource</description>
            <elementTextContainer>
              <elementText elementTextId="1924">
                <text>None</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="44">
            <name>Language</name>
            <description>A language of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1925">
                <text>English</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="51">
            <name>Type</name>
            <description>The nature or genre of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1926">
                <text>Data</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="38">
            <name>Coverage</name>
            <description>The spatial or temporal topic of the resource, the spatial applicability of the resource, or the jurisdiction under which the resource is relevant</description>
            <elementTextContainer>
              <elementText elementTextId="1927">
                <text>LA1 4YF</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="1928">
                <text>Judith Lunn</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="1929">
                <text>MSc</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="1930">
                <text>Cognitive Psychology&#13;
Developmental Psychology</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="1931">
                <text>44 Participants (9 male and 35 female)</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="1932">
                <text>ANOVA&#13;
Linear Mixed Effects Modelling</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
  </item>
  <item itemId="39" public="1" featured="1">
    <fileContainer>
      <file fileId="15">
        <src>https://www.johnntowse.com/LUSTRE/files/original/bc7f35dd0dab490d0e3ccb68caa0378e.pdf</src>
        <authentication>df88f729d82643005434316300f8b8ed</authentication>
      </file>
      <file fileId="16">
        <src>https://www.johnntowse.com/LUSTRE/files/original/16076f53797d6be30852dcd892ee7822.pdf</src>
        <authentication>3fdaf8db14c7f7c74f387933560d67db</authentication>
      </file>
    </fileContainer>
    <collection collectionId="2">
      <elementSetContainer>
        <elementSet elementSetId="1">
          <name>Dublin Core</name>
          <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
          <elementContainer>
            <element elementId="50">
              <name>Title</name>
              <description>A name given to the resource</description>
              <elementTextContainer>
                <elementText elementTextId="179">
                  <text>Eye tracking </text>
                </elementText>
              </elementTextContainer>
            </element>
            <element elementId="41">
              <name>Description</name>
              <description>An account of the resource</description>
              <elementTextContainer>
                <elementText elementTextId="180">
                  <text>Understanding psychological processes though eye tracking</text>
                </elementText>
              </elementTextContainer>
            </element>
          </elementContainer>
        </elementSet>
      </elementSetContainer>
    </collection>
    <elementSetContainer>
      <elementSet elementSetId="1">
        <name>Dublin Core</name>
        <description>The Dublin Core metadata element set is common to all Omeka records, including items, files, and collections. For more information see, http://dublincore.org/documents/dces/.</description>
        <elementContainer>
          <element elementId="50">
            <name>Title</name>
            <description>A name given to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1224">
                <text>Could eye movements provide a window into early signs of dementia? Investigating the relationship between eye movements and cognitive decline.</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="39">
            <name>Creator</name>
            <description>An entity primarily responsible for making the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1225">
                <text>Jennifer Grayling</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="40">
            <name>Date</name>
            <description>A point or period of time associated with an event in the lifecycle of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1226">
                <text>2017</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="41">
            <name>Description</name>
            <description>An account of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1227">
                <text>Past research has indicated a relationship between eye movement abilities and cognitive decline. Specifically, performances on the anti-saccade task and pro-saccade task have demonstrated a correlation with Alzheimer’s disease (AD) severity, suggesting that impairments in visual ability may be a potential biomarker for dementia. However, little research has investigated whether the same deficiency is present in more ecologically valid tasks. The current study therefore aimed to extend the dementia literature by examining eye movement abilities in individuals with AD when completing a task that required the visual exploration of videos. In order to investigate a disease effect, patients with AD were compared to healthy older controls. To explore age effects, the older controls were additionally compared to healthy younger controls. To attempt to replicate previous findings, all groups first completed both the pro-saccade and anti-saccade task, before subsequently completing a video task designed to require similar abilities as those necessitated by the anti-saccade task. The findings revealed clear qualitative differences between the age effect and the disease effect on the anti-saccade task, suggesting that AD is not purely an accelerated form of ageing. Furthermore, the results supported the contemporary literature, in that, patients with AD made more errors, and less corrected errors, on the anti-saccade task. In turn, these results advocate for the utilisation of the task as a biomarker of AD. However, this impairment did not translate to the videos task. These results suggest that certain conditions may be conducive to normal viewing behaviour in patients with AD, and thus may potentially indicate that the natural exploration of videos is not a reliable biomarker for AD.</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="49">
            <name>Subject</name>
            <description>The topic of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1228">
                <text>saccades&#13;
Alzheimers</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="48">
            <name>Source</name>
            <description>A related resource from which the described resource is derived</description>
            <elementTextContainer>
              <elementText elementTextId="1229">
                <text>For all saccade tasks, a repeated measures design was used, with group as the independent variable (AD, OC, and YC). For the PST there were 2 dependent variables: latency and amplitude (see appendix A for definitions). For the AST there were 5 dependent variables latency, amplitude, number of errors, corrected errors percentage, and corrected errors start time (see appendix B for definitions). &#13;
For the videos a 3(Group: AD, OC, YC) x 2(Instructions: free view, instructed view) mixed design was used, with group as a between-subjects factor and instructions as a within-subjects factor. For each of the videos, dwell time was calculated as the dependent variable. A total of 25 AD, 17 OC and 37 YC completed the video task. &#13;
Materials &#13;
Eye tracking apparatus. An EyeLink Desktop 1000 eye-tracker was used at 1000Hz to record eye movements. Participants sat approximately 55cm away from the screen with their head on a chin rest to minimise movement. A 3x3 grid of sequentially appearing dots was used to calibrate participant’s dominant eye with the machine. &#13;
Pro-saccade task. The PST provides a measure of involuntary responses to visual stimulus. The methods utilised in this study followed previously established procedures (Crawford et al., 2005). In order to centre the participant’s gaze, the trial commenced displaying a central fixation target for 1000 milliseconds. This target then disappeared for 200 milliseconds, before a peripheral target appeared for 2000 milliseconds either to the left or right of the initial central fixation target (at ± 4°). The direction was randomised in order to avoid predictive saccades. The inter-trial interval then took place for 1200 milliseconds. The next trial began when the central fixation point reappeared. The participant’s task was to look at the peripheral target as quickly and accurately as possible (see appendix C). There were 36 trials in total. &#13;
Anti-saccade task. The AST provides a measure of inhibition. The task format was identical to the PST except here the participants were instructed to look as quickly as possible to the area, equidistant, but in the opposite direction to the peripheral target (mirror image) (see appendix D). There were 24 test trials in total and 4 practice trials. &#13;
Video task. The video task required participants to watch four, 40 seconds long videos. Three of the videos were viewed three times by participants and consisted of clips from past events in history: the coronation of Queen Elizabeth II, Neil Armstrong landing on the moon, and Gordon Brown and his family leaving Downing Street after losing the general election. Prior to each viewing of the video, participants were given different instructions relating to the video. On first viewing participants were instructed to freely watch the video, this was in order to elicit a bottom-up control of eye gaze to highly salient objects. On second viewing participants were asked questions relating to non-salient objects in order to elicit a top-down control of eye gaze. &#13;
The fourth video, an advertisement for Hovis bread, was viewed only once by participants. Participants were asked to follow with their eyes one object for the entirety of the video. Full details of the questions are given in Appendix E. &#13;
Procedure &#13;
The experiment took place in a well-lit room with no distractions that may have garnered attention. Participants firstly received an information sheet that briefly explained the study (see appendix F). All participants then signed a consent form (see appendix G) to formally provide their consent to take part. &#13;
Before each of the saccade tasks, participants were given time to ask any questions they had. Once they confirmed they understood the experiment, the test trials began. All groups completed the eye-tracking tasks in a pre-defined order. The PST was completed first to avoid carry-over effects previously reported by Roberts et al. (1994), followed by the AST, and lastly the videos task. Participants were offered breaks in-between each task to ensure they did not become distracted or bored. It took no longer than 45 minutes to complete all tasks. Once the experiment had finished, participants were handed a debrief sheet explaining the aims of the study (see appendix H). &#13;
Analyses &#13;
To analyse the videos, total dwell times to the question relevant interest areas were compared to total dwell times to these interest areas when the participants viewed the same video freely. For example, to analyse the trial were participants were asked to count the windows when viewing the Gordon Brown video, a number of interest areas were created around the windows (see appendix I). The total dwell times inside these areas were then compared to the total dwell times inside these areas when participants viewed the same video freely.&#13;
</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="45">
            <name>Publisher</name>
            <description>An entity responsible for making the resource available</description>
            <elementTextContainer>
              <elementText elementTextId="1230">
                <text>Lancaster University</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="42">
            <name>Format</name>
            <description>The file format, physical medium, or dimensions of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1231">
                <text>data/SPSS.sav</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="43">
            <name>Identifier</name>
            <description>An unambiguous reference to the resource within a given context</description>
            <elementTextContainer>
              <elementText elementTextId="1232">
                <text>Grayling2017</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="37">
            <name>Contributor</name>
            <description>An entity responsible for making contributions to the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1233">
                <text>John Towse</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="47">
            <name>Rights</name>
            <description>Information about rights held in and over the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1234">
                <text>Open</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="46">
            <name>Relation</name>
            <description>A related resource</description>
            <elementTextContainer>
              <elementText elementTextId="1235">
                <text>This is part of an on-going EPSRC funded MoDEM study approved by the NHS to Dr Crawford</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="44">
            <name>Language</name>
            <description>A language of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1236">
                <text>English</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="51">
            <name>Type</name>
            <description>The nature or genre of the resource</description>
            <elementTextContainer>
              <elementText elementTextId="1237">
                <text>Data</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="38">
            <name>Coverage</name>
            <description>The spatial or temporal topic of the resource, the spatial applicability of the resource, or the jurisdiction under which the resource is relevant</description>
            <elementTextContainer>
              <elementText elementTextId="1238">
                <text>LA1 4YF</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
      <elementSet elementSetId="4">
        <name>LUSTRE</name>
        <description>Adds LUSTRE specific project information</description>
        <elementContainer>
          <element elementId="52">
            <name>Supervisor</name>
            <description>Name of the project supervisor</description>
            <elementTextContainer>
              <elementText elementTextId="1239">
                <text>Trevor Crawford</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="53">
            <name>Project Level</name>
            <description>Project levels should be entered as UG or MSC</description>
            <elementTextContainer>
              <elementText elementTextId="1240">
                <text>MSc</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="54">
            <name>Topic</name>
            <description>Should contain the sub-category of Psychology the project falls under</description>
            <elementTextContainer>
              <elementText elementTextId="1241">
                <text>Cognitive Psychology&#13;
Neuroscience</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="56">
            <name>Sample Size</name>
            <description/>
            <elementTextContainer>
              <elementText elementTextId="1242">
                <text>33 participants diagnosed with AD by the NHS (age range = 59-90 years; Mage = 74.48; SDage = 8.16; females = 14; males = 15) were recruited through NHS trust sites, and 92 healthy OC (age range = 48-83 years; Mage = 67.66; SDage = 8.92; females = 31; males = 13) were recruited through a local church.</text>
              </elementText>
            </elementTextContainer>
          </element>
          <element elementId="55">
            <name>Statistical Analysis Type</name>
            <description>The type of statistical analysis used in the project</description>
            <elementTextContainer>
              <elementText elementTextId="1243">
                <text>ANOVA</text>
              </elementText>
            </elementTextContainer>
          </element>
        </elementContainer>
      </elementSet>
    </elementSetContainer>
  </item>
</itemContainer>
