<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Res Protoc</journal-id><journal-id journal-id-type="publisher-id">ResProt</journal-id><journal-id journal-id-type="index">5</journal-id><journal-title>JMIR Research Protocols</journal-title><abbrev-journal-title>JMIR Res Protoc</abbrev-journal-title><issn pub-type="epub">1929-0748</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v15i1e90588</article-id><article-id pub-id-type="doi">10.2196/90588</article-id><article-categories><subj-group subj-group-type="heading"><subject>Protocol</subject></subj-group></article-categories><title-group><article-title>Evaluating the Methodological Quality of Artificial Intelligence&#x2013;Assisted Systematic Reviews: Protocol for a Mixed Methods Meta-Research Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Jay</surname><given-names>Mohammad</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Morgan</surname><given-names>Mary</given-names></name><degrees>BSc</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Straus</surname><given-names>Sharon Elizabeth</given-names></name><degrees>MD, MSc</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wilson</surname><given-names>Emma</given-names></name><degrees>MLIS</degrees><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sutradhar</surname><given-names>Rinku</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff7">7</xref><xref ref-type="aff" rid="aff8">8</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Yu</surname><given-names>Catherine</given-names></name><degrees>MD, MHSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gotlib Conn</surname><given-names>Lesley</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Dharma</surname><given-names>Christoffer</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Lipscombe</surname><given-names>Lorraine</given-names></name><degrees>MD, MSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff9">9</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Eskander</surname><given-names>Antoine</given-names></name><degrees>MD, ScM</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff10">10</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Medicine, Division of Endocrinology, University of Toronto</institution><addr-line>2075 Bayview Avenue</addr-line><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff2"><institution>Institute of Health Policy, Management and Evaluation (IHPME), Dalla Lana School of Public Health, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff3"><institution>Faculty of Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff4"><institution>Department of Medicine, Division of Geriatric Medicine, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff5"><institution>Knowledge Translation Program, Li Ka Shing Knowledge Institute, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff6"><institution>Library Services, Sunnybrook Health Science Centre</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff7"><institution>Institute for Clinical Evaluative Sciences</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff8"><institution>Division of Biostatistics, Dalla Lana School of Public Health, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff9"><institution>Department of Medicine, Women's College Hospital</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff10"><institution>Department of Otolaryngology&#x2013;Head and Neck Surgery, Sunnybrook Health Sciences Centre, University of Toronto</institution><addr-line>Toronto</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Ting</surname><given-names>Eon</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Smallheiser</surname><given-names>Neil</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Mohammad Jay, MD, Department of Medicine, Division of Endocrinology, University of Toronto, 2075 Bayview Avenue, Toronto, ON, M4N 3M5, Canada, 1 416-480-6705, 1 416-480-5761; <email>mohammad.jay@mail.utoronto.ca</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>14</day><month>5</month><year>2026</year></pub-date><volume>15</volume><elocation-id>e90588</elocation-id><history><date date-type="received"><day>30</day><month>12</month><year>2025</year></date><date date-type="rev-recd"><day>16</day><month>03</month><year>2026</year></date><date date-type="accepted"><day>17</day><month>03</month><year>2026</year></date></history><copyright-statement>&#x00A9; Mohammad Jay, Mary Morgan, Sharon Elizabeth Straus, Emma Wilson, Rinku Sutradhar, Catherine Yu, Lesley Gotlib Conn, Christoffer Dharma, Lorraine Lipscombe, Antoine Eskander. Originally published in JMIR Research Protocols (<ext-link ext-link-type="uri" xlink:href="https://www.researchprotocols.org">https://www.researchprotocols.org</ext-link>), 14.5.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Research Protocols, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.researchprotocols.org">https://www.researchprotocols.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.researchprotocols.org/2026/1/e90588"/><abstract><sec><title>Background</title><p>Artificial intelligence (AI), including large language models (LLMs), is increasingly integrated into systematic review (SR) workflows. AI tools may accelerate searching, screening, data extraction, and reporting, but their effects on methodological quality, reporting completeness, transparency, and reproducibility remain uncertain. Existing evaluations largely examine isolated tasks, and inconsistent disclosure of AI use limits reproducibility and oversight.</p></sec><sec><title>Objective</title><p>This 4-phase mixed methods meta-research study will (1) compare the methodological quality of AI-assisted versus traditional SRs; (2) refine, finalize, and apply a preliminary AI Transparency and Disclosure Index (AITDI); (3) evaluate reproducibility by comparing outputs across repeated runs of the same AI model, across different AI models, and between AI models and human reviewers at multiple SR stages; and (4) explore knowledge user perspectives on rigor, transparency, and trust in AI-assisted SRs.</p></sec><sec sec-type="methods"><title>Methods</title><p>We will conduct a matched cohort analysis of SRs published from 2023 to 2025 in biomedical journals. Each AI-assisted SR will be matched 1:2 with traditional SRs by publication year, clinical domain, review type, and meta-analysis status. Two independent reviewers will apply A Measurement Tool to Assess Systematic Reviews, version 2 (AMSTAR 2; methodological quality), PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 (reporting completeness), and, when applicable, Risk of Bias in SRs (ROBIS; risk-of-bias rigor). A preliminary AITDI will be refined and then applied to all AI-assisted SRs. Reproducibility will be assessed using SR-derived task sets to compare outputs across repeated runs of the same model, across different models, and between AI and human reviewers at key SR stages. Semistructured interviews with authors, editors, clinicians, policymakers, and patient partners will be analyzed using reflexive thematic analysis.</p></sec><sec sec-type="results"><title>Results</title><p>As of December 2025, the study has been preregistered on the Open Science Framework (OSF; DOI: 10.17605/OSF.IO/Q5JRW), the search strategy has been finalized, and title/abstract screening has begun. Data extraction is planned for March-May 2026, followed by AITDI refinement and reproducibility testing from May 2026 to October 2026. Qualitative interviews are anticipated from October 2026 to February 2027, with final analyses by April 2027 and dissemination planned for mid-2027.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This study will provide one of the first empirical comparisons of methodological quality, transparency, and reproducibility of AI-assisted versus traditional SRs in the LLM era. Findings will inform expectations for responsible AI integration and support refinement of reporting and methodological best practices, including future development of AI-specific reporting and appraisal extensions (eg, PRISMA-LLM [Preferred Reporting Items for Systematic Reviews and Meta-Analyses&#x2013;large language model] and AMSTAR-LLM [A Measurement Tool to Assess Systematic Reviews-large language model]).</p></sec><sec><title>Trial Registration</title><p>OSF Registries 4nq3t; https://osf.io/4nq3t</p></sec><sec sec-type="registered-report"><title>International Registered Report Identifier (IRRID)</title><p>PRR1-10.2196/90588</p></sec></abstract><kwd-group><kwd>systematic review</kwd><kwd>meta-research</kwd><kwd>artificial intelligence</kwd><kwd>large language models</kwd><kwd>AMSTAR-2</kwd><kwd>PRISMA 2020</kwd><kwd>transparency</kwd><kwd>reproducibility</kwd><kwd>evidence synthesis</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Systematic reviews (SRs) and meta-analyses synthesize evidence using transparent, reproducible methods and inform guidelines and policy [<xref ref-type="bibr" rid="ref1">1</xref>]. However, traditional SR workflows are resource-intensive and often outdated by publication. Throughout this manuscript, we use the term &#x201C;traditional systematic reviews&#x201D; to refer to human-driven systematic reviews (ie, reviews conducted without reported artificial intelligence [AI] assistance). Interest in AI, particularly large language models (LLMs), reflects the need to accelerate these processes [<xref ref-type="bibr" rid="ref2">2</xref>]. As AI tools are applied to screening, data extraction, and drafting, concerns arise about whether AI-assisted SRs preserve methodological rigor and appropriate human oversight.</p></sec><sec id="s1-2"><title>AI in SRs</title><p>AI tools (eg, machine learning classifiers, natural language processing, and LLMs) are increasingly used for searching, screening prioritization, data extraction, risk-of-bias assessment, and reporting. Platforms such as Abstrackr (Brown University) [<xref ref-type="bibr" rid="ref3">3</xref>], Rayyan (Rayyan Systems Inc) [<xref ref-type="bibr" rid="ref4">4</xref>], DistillerSR (Evidence Partners Inc) [<xref ref-type="bibr" rid="ref5">5</xref>], and RobotReviewer (King&#x2019;s College London) [<xref ref-type="bibr" rid="ref6">6</xref>] integrate automated features to streamline workflows. While AI may improve efficiency, concerns persist regarding transparency, reproducibility, and errors that may influence clinical or policy decisions. Evaluations must therefore consider the quality of completed SR rather than isolated tasks.</p></sec><sec id="s1-3"><title>Existing Evidence</title><p>Evidence to date is limited and mixed. Prior studies suggest potential efficiency gains with a broadly similar tool, A Measurement Tool to Assess Systematic Reviews version 2 (AMSTAR 2), scores in selected settings [<xref ref-type="bibr" rid="ref7">7</xref>] and variable agreement for specific appraisal tasks [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Others caution that errors, inconsistent outputs, and poor disclosure may compromise trust and reproducibility [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Overall, the literature largely evaluates discrete tasks or narrow clinical areas, with limited assessment of the methodological and reporting quality of fully published AI-assisted SRs, AI transparency, cross-run/model reproducibility, or knowledge user expectations.</p></sec><sec id="s1-4"><title>Evidence Gap in the LLM Era</title><p>LLM use in SR workflows has expanded rapidly since 2023; yet, reporting of AI involvement is inconsistent. Existing appraisal (eg, AMSTAR 2 [<xref ref-type="bibr" rid="ref11">11</xref>] and the Risk of Bias in SRs [ROBIS] [<xref ref-type="bibr" rid="ref12">12</xref>]) and reporting tools (eg, the PRISMA [Preferred Reporting Items for Systematic Reviews and Meta-Analyses] 2020 [<xref ref-type="bibr" rid="ref13">13</xref>]) assess general methodological rigor and reporting completeness (eg, protocol registration, search comprehensiveness, duplicate processes, and risk-of-bias appraisal), but do not capture AI-specific details such as tool identity, model version, prompting, stage of use, or human oversight. To address this gap, we developed a preliminary AI Transparency and Disclosure Index (AITDI; <xref ref-type="table" rid="table1">Table 1</xref>).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Preliminary artificial intelligence transparency and disclosure index (AITDI): domains and minimal reporting requirements.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Domain</td><td align="left" valign="bottom">Description</td><td align="left" valign="bottom">Minimal reporting requirement</td></tr></thead><tbody><tr><td align="left" valign="top">Tool identity</td><td align="left" valign="top">Identifies the AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> system used (eg, screening classifier, LLM<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup>, and extraction tool).</td><td align="left" valign="top">State the specific tool or model used (eg, ChatGPT [OpenAI] and Rayyan [Rayyan Systems Inc] classifier).</td></tr><tr><td align="left" valign="top">Model version or date</td><td align="left" valign="top">AI models update frequently; versioning is essential for reproducibility.</td><td align="left" valign="top">Provide the version number, release date, or retrieval date.</td></tr><tr><td align="left" valign="top">Stage of use</td><td align="left" valign="top">Specifies which systematic review stages involved AI.</td><td align="left" valign="top">List all workflow stages where AI contributed (eg, screening, extraction, appraisal, synthesis, and drafting).</td></tr><tr><td align="left" valign="top">Parameter settings or prompts</td><td align="left" valign="top">Prompts and settings (eg, temperature) influence outputs.</td><td align="left" valign="top">Report prompts, parameters, thresholds, or configurations used.</td></tr><tr><td align="left" valign="top">Human oversight</td><td align="left" valign="top">Describes reviewer verification, correction, and judgment.</td><td align="left" valign="top">Specify the oversight structure and degree of human verification.</td></tr><tr><td align="left" valign="top">Data governance and ethics</td><td align="left" valign="top">Notes privacy, confidentiality, and security issues relevant to AI use.</td><td align="left" valign="top">Describe data handling safeguards and ethical considerations.</td></tr><tr><td align="left" valign="top">Recommended (nonscored): AI-related limitations</td><td align="left" valign="top">AI risks such as hallucinations, instability, or model drift affect interpretability but do not distinguish reviews.</td><td align="left" valign="top">Provide a brief statement acknowledging AI-specific limitations (not scored).</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table1fn2"><p><sup>b</sup>LLM: large language model.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s1-5"><title>Purpose and Significance</title><p>This study responds to emerging use of AI in SR workflows by providing a comprehensive evaluation of AI-assisted SRs at the level of completed reviews. By integrating assessment of methodological quality, reporting completeness, transparency, reproducibility, and knowledge-user perspectives, this work moves beyond task-level evaluations to address how AI affects review quality of SRs in practice. The findings will inform best practices for responsible AI integration and support future development of AI-specific reporting and appraisal extensions.</p></sec><sec id="s1-6"><title>Objectives</title><p>This 4-phase mixed methods meta-research study aims to (1) compare the methodological quality of AI-assisted versus traditional SRs; (2) refine, validate, and apply a preliminary AITDI to evaluate AI-related reporting; (3) assess the reproducibility of AI-assisted SR processes across repeated runs, different models, and human comparators; and (4) explore knowledge user perspectives on rigor, transparency, oversight, and trust in AI-assisted SRs. <xref ref-type="fig" rid="figure1">Figure 1</xref> provides a schematic overview of the four study aims and how they interrelate within the mixed methods design.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Study design schematic for aims 1&#x2010;4 and planned future tool development. AI: artificial intelligence; AITDI: Artificial Intelligence Transparency and Disclosure Index; AMSTAR 2: A Measurement Tool to Assess Systematic Reviews version 2; LLM: large language model; PRISMA 2020: Preferred Reporting Items for Systematic Reviews and Meta-Analyses 2020; PRISMA-LLM: Preferred Reporting Items for Systematic Reviews and Meta-Analyses&#x2013;large language model; ROBIS: Risk of Bias in Systematic Reviews; SR: systematic review.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e90588_fig01.png"/></fig></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>This is a 4-aim mixed methods meta-research study. Aim 1 is a matched-cohort comparison of AI-assisted and traditional SRs published between 2023 and 2025 (the LLM era [<xref ref-type="bibr" rid="ref14">14</xref>]). Aim 2 refines and validates the AITDI, following best practices for reporting guideline development (Moher et al [<xref ref-type="bibr" rid="ref15">15</xref>]). Aim 3 evaluates the reproducibility of AI outputs using SR-derived task sets. Aim 4 uses qualitative interviews to explore knowledge-user perspectives. <xref ref-type="fig" rid="figure2">Figure 2</xref> summarizes the workflow for Aim 1 and integration with Aim 4.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Workflow for aim 1 and integration with aim 4. Overview of the processes for identifying, matching, extracting, and appraising systematic reviews, followed by comparative analyses and integration with qualitative themes. AI: artificial intelligence; AITDI: AI Transparency and Disclosure Index; AMSTAR 2: A Measurement Tool to Assess Systematic Reviews version 2; PRISMA 2020: Preferred Reporting Items for Systematic Reviews and Meta-Analyses 2020; ROBIS: Risk of Bias in Systematic Reviews; SR: systematic review.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e90588_fig02.png"/></fig></sec><sec id="s2-2"><title>Aim 1: Comparative Quality Study</title><sec id="s2-2-1"><title>Eligibility Criteria</title><p>Eligible records will consist of peer-reviewed published articles that self-identify as SRs, with or without meta-analysis, written in English, and indexed in MEDLINE, CINAHL, or the Cochrane Database of Systematic Reviews. SRs must address health-related questions (human health outcomes, public health, health care delivery, or health-system decision-making). AI-assisted SRs must be published between January 1, 2023, and December 31, 2025. To support time-proximal matching (&#x00B1;3 months; see &#x201C;Matching Strategy&#x201D;), traditional (non&#x2013;AI-assisted) comparator SRs may be published between October 1, 2022, and March 31, 2026, provided they meet all other eligibility criteria. We will exclude scoping reviews, rapid reviews, evidence maps, narrative reviews, umbrella or overview reviews, as well as protocols, conference abstracts, letters, commentaries, editorials, preprints, and reviews focused exclusively on preclinical or animal research. Reviews lacking full-text availability after reasonable efforts to obtain the article (including contacting the corresponding author when appropriate) or a reproducible search strategy will also be excluded. Title/abstract screening will confirm SR status, health relevance, and publication characteristics. Classification of AI will occur at full-text review based on explicit reporting in the manuscript or supplementary materials. <xref ref-type="table" rid="table2">Table 2</xref> summarizes the population, intervention, comparator, and outcome (PICO) eligibility criteria for included SRs.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Population, intervention, comparator, and outcome (PICO) eligibility criteria for included systematic reviews</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Domain</td><td align="left" valign="bottom">Eligibility criteria</td></tr></thead><tbody><tr><td align="left" valign="top">Population</td><td align="left" valign="top">Systematic reviews focused on human health, clinical medicine, public health, or health care&#x2013;related topics. No restrictions on patient age, condition, or setting.</td></tr><tr><td align="left" valign="top">Intervention / Exposure</td><td align="left" valign="top">Explicit use of artificial intelligence (AI)<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>, machine learning, natural language processing, large language models (LLMs)<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>, or automation tools at any stage of the systematic review workflow (eg, searching, screening, data extraction, risk-of-bias assessment, synthesis, or drafting).</td></tr><tr><td align="left" valign="top">Comparator</td><td align="left" valign="top">Traditional (non&#x2013;AI-assisted) systematic reviews matched 1:2 to AI-assisted SRs<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup> by publication year, review type, clinical domain, and presence/absence of a meta-analysis.</td></tr><tr><td align="left" valign="top">Outcomes</td><td align="left" valign="top">Methodological quality (AMSTAR 2)<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup>, reporting completeness (PRISMA 2020)<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup>, risk-of-bias rigor (ROBIS)<sup><xref ref-type="table-fn" rid="table2fn6">f</xref></sup>, AI transparency (AITDI<sup><xref ref-type="table-fn" rid="table2fn7">g</xref></sup>; AI-assisted SRs only), and secondary outcomes including timeliness to publication, dissemination metrics, and workload indicators (when available).</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table2fn2"><p><sup>b</sup>LLM: large language model.</p></fn><fn id="table2fn3"><p><sup>c</sup>SR: systematic review.</p></fn><fn id="table2fn4"><p><sup>d</sup>AMSTAR 2: A Measurement Tool to Assess Systematic Reviews version 2.</p></fn><fn id="table2fn5"><p><sup>e</sup>PRISMA 2020: Preferred Reporting Items for Systematic Reviews and Meta-Analyses 2020.</p></fn><fn id="table2fn6"><p><sup>f</sup>ROBIS: Risk of Bias in Systematic Reviews.</p></fn><fn id="table2fn7"><p><sup>g</sup>AITDI: AI Transparency and Disclosure Index.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-2-2"><title>Exposure and Comparator Definitions</title><p>The exposure is author-reported AI assistance, operationalized as explicit documentation of AI, machine learning, natural language processing, or LLMs at any stage of the SR workflow, including searching, screening or prioritization, data extraction, risk-of-bias assessment, evidence synthesis (qualitative/narrative or quantitative/meta-analytic), or manuscript drafting. Evidence of AI use may appear in the main text, supplementary materials, acknowledgments, or dedicated disclosure statements. The comparator will include SRs meeting all other eligibility criteria with no reported AI use in the available published materials. Comparator publication dates will align with the parameters specified in &#x201C;Eligibility Criteria&#x201D; to permit time-proximal matching. Conventional software without embedded AI functionality (eg, Covidence [Veritas Health Innovation Ltd], RevMan [Review Manager], and Excel [Microsoft Corp]) will be classified as non&#x2013;AI-assisted.</p><p>AI involvement will be classified by stage and depth of integration rather than treated as a binary. Core methodological uses will include AI contributing directly to evidence identification, selection, appraisal, or synthesis (eg, searching, screening, data extraction, risk-of-bias assessment, or synthesis), whereas supporting use will include drafting or language editing only. Because exposure classification depends on explicit reporting, some comparator reviews may have involved unreported AI use. If such misclassification is nondifferential with respect to methodological quality, it would be expected to attenuate observed between-group differences toward the null. Accordingly, Aim 1 should be interpreted as primarily comparing reviews with reported AI use versus reviews with no reported AI use.</p></sec></sec><sec id="s2-3"><title>Matching Strategy</title><sec id="s2-3-1"><title>Rationale for Matching</title><p>A matched cohort design minimizes temporal and topic-related confounding that could independently influence review quality. Matching ensures that AI-assisted and traditional SRs are compared within the same publication period and clinical context, reducing bias from secular trends in SR methodology, evolving journal standards, or variation across clinical domains.</p></sec><sec id="s2-3-2"><title>Matching Factors</title><p>Each AI-assisted SR will be matched to two traditional SRs based on four prespecified characteristics: (1) publication year (within &#x00B1;3 mo) to control for rapidly evolving norms in the LLM era; (2) clinical domain, operationalized using major MeSH (Medical Subject Headings) terms or journal specialty classification; (3) review type (standard or living review); and (4) meta-analysis status (presence vs absence of quantitative synthesis). When multiple eligible comparators exist, two will be selected using a reproducible randomization procedure.</p></sec><sec id="s2-3-3"><title>Why a 1:2 Ratio</title><p>A 1:2 matching ratio increases statistical power and precision relative to 1:1 matching while maintaining feasibility given the expected smaller pool of AI-assisted SRs. This ratio balances efficiency with practicality and follows best practices in meta-epidemiologic design [<xref ref-type="bibr" rid="ref16">16</xref>].</p></sec></sec><sec id="s2-4"><title>Outcomes</title><sec id="s2-4-1"><title>Quality Assessment Instruments</title><p>Methodological quality will be assessed using AMSTAR 2, a validated 16-item appraisal tool covering key domains including protocol registration, comprehensive searching, duplicate processes, risk-of-bias assessment, and appropriateness of synthesis methods [<xref ref-type="bibr" rid="ref17">17</xref>]. Reporting completeness will be evaluated using the 27-item PRISMA 2020 checklist [<xref ref-type="bibr" rid="ref13">13</xref>] and risk of bias in review methods using ROBIS [<xref ref-type="bibr" rid="ref12">12</xref>], which assesses eligibility criteria, study selection, data collection and appraisal, and synthesis and interpretation. Transparency of AI use will be assessed using the AITDI, developed for this study, which includes 6 domains, namely tool identity, stage of use, model version, prompting or configuration details, human verification, and data-privacy or ethical statements, along with a recommended nonscored domain addressing AI-related limitations (eg, hallucinations or model drift). The development of the AITDI as a preliminary framework is described under Aim 2, and the index domains are summarized in <xref ref-type="table" rid="table1">Table 1</xref>.</p></sec><sec id="s2-4-2"><title>The Primary Outcome</title><p>The primary outcome will be methodological quality, measured using the AMSTAR 2 tool and expressed as a modified 0&#x2010;13 item-level adherence score. In this meta-research study, this operational summary measure was selected because the primary estimand is the average difference in methodological adherence between groups of systematic reviews, rather than the appraisal of individual reviews for decision-making purposes. A continuous item-level score allows estimation of absolute group-level differences and supports a prespecified noninferiority framework. Relative to coarse ordinal confidence categories, a continuous item-level adherence measure preserves more information and allows more sensitive detection of average between-group differences in methodological adherence. We recognize that AMSTAR 2 was originally designed to support appraisal through critical domains and overall confidence ratings. However, those overall confidence categories are intentionally coarse and nonlinear and are designed to reflect critical weaknesses in individual reviews rather than average between-group differences in methodological adherence in a matched comparative meta-research design. Accordingly, we will use a modified item-level adherence score as the primary analytic outcome while interpreting results in conjunction with AMSTAR 2 overall confidence ratings and prespecified critical domains. Although AMSTAR 2 was not developed as a formal additive scale, similar item-level summary approaches have been used in prior meta-research to facilitate group-level comparisons [<xref ref-type="bibr" rid="ref7">7</xref>] and should be interpreted cautiously alongside standard AMSTAR 2 confidence ratings. Consistent with the structure of the included review sample, items that are conditional on quantitative synthesis (AMSTAR 2 items 11, 12, and 15) will be excluded from the summary measure, yielding a maximum possible score of 13. These items will still be extracted for reviews that include meta-analysis and will be summarized descriptively and in subgroup analyses restricted to meta-analytic reviews. This modified score is intended as a group-level comparative metric rather than a replacement for standard AMSTAR 2 interpretation at the level of individual reviews. Critical AMSTAR 2 domains (eg, protocol registration, duplicate processes, and comprehensiveness of the search) will also be summarized descriptively.</p></sec><sec id="s2-4-3"><title>Secondary Outcomes</title><p>This will include overall reporting quality (percentage adherence to PRISMA 2020), risk of bias rigor (domain-level and overall ROBIS judgments, when applicable), and transparency of AI use measured using the preliminary AITDI score (an additive item-level score reflecting completeness of AI-use disclosure across 6 domains; applied provisionally in Aim 1, with full refinement and validation in Aim 2). Exploratory outcomes will include timeliness, defined as the interval between protocol registration (or earliest submission) and publication, and dissemination metrics such as 12-month citation counts and Altmetric Attention Scores. Exploratory analyses will also examine whether characteristics such as review type (standard or living), inclusion of meta-analysis, journal tier, team size, and clinical domain are associated with higher methodological or reporting quality. <xref ref-type="fig" rid="figure3">Figure 3</xref> presents the exposure-outcome schema for the comparative analyses. <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> provides a detailed summary of all primary and secondary outcomes and their corresponding quality assessment tools.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Exposure-outcome schema for Aim 1. The exposure is artificial intelligence (AI)&#x2013;assisted systematic reviews compared to traditional systematic reviews. The primary outcome is methodological quality (AMSTAR 2). Secondary outcomes include reporting quality (PRISMA 2020), risk-of-bias rigor (ROBIS), AI transparency (AITDI), timeliness to publication, and dissemination metrics. AI: artificial intelligence; AITDI: AI Transparency and Disclosure Index; AMSTAR 2: A Measurement Tool to Assess Systematic Reviews version 2; PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses; ROBIS: Risk of Bias in Systematic Reviews; SR: systematic review.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e90588_fig03.png"/></fig></sec></sec><sec id="s2-5"><title>Information Sources and Search Strategy</title><p>An information specialist will search MEDLINE (Ovid), CINAHL (EBSCO), and the Cochrane Database of Systematic Reviews to identify eligible SRs published between January 1, 2023, and December 31, 2025. Two coordinated searches will be conducted: (1) a search combining validated SR filters with AI-related terminology to identify SRs explicitly reporting AI assistance; and (2) a broader search applying only the SR filter to identify the comparator sampling frame.</p><p>Validated SR filters from Canada&#x2019;s Drug Agency Search Filters Database will be used and adapted as needed for each database, including the SR/meta-analysis/health technology assessment/indirect treatment comparison filter for MEDLINE and the corresponding filter for CINAHL [<xref ref-type="bibr" rid="ref18">18</xref>]. The Cochrane Database of Systematic Reviews will be searched without an SR filter, as it exclusively indexes review articles. Search strategies will be drafted, piloted, and peer reviewed using the Peer Review of Electronic Search Strategies (PRESS) [<xref ref-type="bibr" rid="ref19">19</xref>] guideline. All searches will be rerun prior to final analyses to ensure retrieval of newly indexed records. Database-specific search strategies for the AI-assisted SR search are provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p></sec><sec id="s2-6"><title>Sampling, Matching, and Study Selection</title><p>Study selection will proceed in three sequential stages: (1) identification and eligibility assessment of AI-assisted SRs, (2) construction of the traditional SR comparator sampling frame, and (3) matching and final inclusion.</p><sec id="s2-6-1"><title>Stage 1: Identification and Eligibility Assessment of AI-Assisted SRs</title><p>All records retrieved from the AI-targeted search will undergo a 2-stage screening process consisting of title/abstract screening followed by full-text assessment, conducted independently by 2 reviewers. Prior to each screening stage, reviewers will complete a calibration exercise on a sample of 10 records to ensure consistent interpretation of eligibility criteria; calibration will be considered acceptable at &#x2265;80% agreement, with decision rules refined and retested if needed.</p><p>During title and abstract screening, reviewers will assess records only for article type (SR), relevance to human health, and publication characteristics (English language; 2023&#x2010;2025). No assessment or inference regarding AI use will be made at this stage. All records deemed potentially eligible based on title and abstract will undergo full-text review. During full-text assessment, reviewers will apply all inclusion and exclusion criteria and confirm explicit AI use at any stage of the SR process (eg, searching, screening, data extraction, risk-of-bias assessment, synthesis, or drafting), based on the main text, supplementary materials, acknowledgements, or disclosure statements. Discrepancies at either stage will be resolved through discussion, with unresolved cases adjudicated by a third reviewer. Ambiguous cases regarding AI involvement will prompt a single clarification email to the corresponding author. All records meeting eligibility criteria and demonstrating explicit AI involvement will constitute the AI-assisted exposure cohort.</p></sec><sec id="s2-6-2"><title>Stage 2: Construction of the Comparator Candidate Pool (Unscreened)</title><p>The general SR search will provide a broad comparator sampling frame from which candidate matches will be drawn.</p></sec><sec id="s2-6-3"><title>Stage 3: Matching and Final Inclusion</title><p>For each eligible AI-assisted SR, potential comparators will be identified from the traditional SR sample using the matching criteria described in &#x201C;Matching Strategy.&#x201D; When more than 2 potential matches are available, selection will occur using a randomized, reproducible procedure. Subsequently, 2 reviewers will screen full texts to confirm eligibility criteria are met. Comparator SRs failing eligibility will be replaced with the next randomly selected candidate. Sampling will proceed consecutively by earliest publication date within each quarter until at least 150 AI-assisted SRs and 300 matched traditional SRs have been included.</p></sec></sec><sec id="s2-7"><title>Data Extraction</title><p>Two reviewers will independently extract all data using a piloted and standardized extraction form. Prior to formal extraction, reviewers will complete a calibration exercise on a sample of 5&#x2010;10 SRs to ensure consistent interpretation of extraction items and coding rules. Discrepancies during extraction will be resolved through discussion, with unresolved differences adjudicated by a third reviewer. Extracted data will include bibliographic characteristics; publication features; methodological elements (eg, protocol registration, number and type of included studies, and use of risk-of-bias tools); structural indicators of review oversight (eg, Cochrane affiliation, funding source, and team size); author-reported involvement of an information specialist when explicitly stated; and indicators of timeliness and dissemination. For AI-assisted SRs, additional fields will capture the AI tool name; model type (eg, large language model&#x2013;based systems, screening classifiers, and others); stage of integration; prompting or configuration details when available; and any description of human oversight, validation, or verification procedures. When available, prompting or configuration details and descriptions of human oversight will be extracted to support classification of the depth of AI integration (core methodological vs supporting use) and to characterize transparency of AI reporting. Extraction items, operational definitions, and coding guidance are provided in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p></sec><sec id="s2-8"><title>Appraisal of Methodological Rigor and Reporting Completeness</title><p>Methodological and reporting quality will be appraised using AMSTAR 2 and PRISMA 2020, with ROBIS applied when relevant to assess risk of bias in the review process. No study-level risk-of-bias tools (eg, ROB 2 [Risk of Bias 2; Cochrane] and ROBINS-I [Risk of Bias in Nonrandomized Studies of Interventions; Cochrane]) will be used, as the unit of analysis is the SR. All quality assessments will be conducted in duplicate, with discrepancies resolved by consensus or third-reviewer adjudication. AMSTAR 2 and PRISMA 2020 emphasize transparency, completeness, and appropriateness of methods; therefore, high scores may still coexist with inaccuracies in extracted results. These considerations will be incorporated into the interpretation of findings.</p></sec><sec id="s2-9"><title>Statistical Analysis</title><sec id="s2-9-1"><title>Descriptive Analyses</title><p>Study characteristics will be summarized by exposure group (AI-assisted vs traditional SRs). Continuous variables will be reported as means with SDs or medians with IQRs, and categorical variables as frequencies and percentages. Distributions of AMSTAR 2, PRISMA 2020, ROBIS, and AITDI scores will be visualized using boxplots and density plots.</p></sec><sec id="s2-9-2"><title>Primary Analysis</title><p>The primary objective is to evaluate whether the methodological quality of AI-assisted SRs is noninferior to that of traditional SRs. A linear regression model will estimate the mean difference in AMSTAR 2 total score (AI minus comparator) using a noninferiority margin of &#x2013;1.0, with cluster-robust standard errors to account for matched sets. Models will adjust for prespecified confounders conceptually linked to SR quality and AI adoption, including journal impact tier, funding source (industry vs nonindustry), Cochrane affiliation, team size, and review question type (eg, intervention, diagnostic, and prognostic). The modified AMSTAR 2 total score will be treated as an approximately continuous outcome. Score distributions will be examined using histograms and density plots to assess skewness and floor or ceiling effects. Linear regression was selected because the primary inferential goal is estimation of an absolute difference in average methodological adherence between groups on a directly interpretable scale.</p><p>The noninferiority margin of &#x2212;1.0 was selected a priori to represent the largest acceptable decrement corresponding approximately to one AMSTAR 2 item-level adherence difference on the 13-point modified scale. Differences greater than this threshold would be considered potentially meaningful from a methodological perspective. Because additive scoring does not fully reflect the intended critical-domain structure of AMSTAR 2, results from the primary analysis will be interpreted alongside complementary analyses of AMSTAR 2 overall confidence ratings and prespecified critical domains.</p><p>We acknowledge that key drivers of SR quality, such as author team expertise, information specialist involvement, and journal-level enforcement of PRISMA standards, may not be consistently or objectively ascertainable from published reports, and that residual confounding may persist. To mitigate this, our models adjust for established and directly extractable proxies of review rigor, including journal impact tier, Cochrane affiliation, funding source, team size, and review question type. Information specialist involvement will be summarized descriptively and, where reporting is sufficiently complete, it will be incorporated into exploratory adjusted models. Matching variables (review type, meta-analysis status, clinical domain, and publication year) will be added only if residual imbalance persists (standardized mean difference &#x003E;0.10).</p></sec><sec id="s2-9-3"><title>Secondary Analyses</title><p>The following secondary analyses are planned:</p><p>(1) AMSTAR-2 domain-based analyses: to complement the modified item-level adherence analysis and better reflect the intended use of AMSTAR 2, overall confidence ratings (high, moderate, low, or critically low) will be summarized by group and compared using ordinal logistic regression with cluster-robust variance estimators or multinomial models if the proportional odds assumption is violated. Prespecified critical AMSTAR 2 domains will also be summarized descriptively and compared between groups using binary regression models where appropriate.</p><p>(2) Reporting quality and risk-of-bias rigor: this will be evaluated using linear regression for PRISMA 2020 adherence and ordinal or multinomial logistic regression for ROBIS judgments, with all models applying cluster-robust variance estimators and adjusting for the same core covariates as in the primary analysis.</p><p>(3) AI transparency (AITDI): for AI-assisted SRs, AITDI scores will be summarized descriptively (eg, means, medians, and distribution plots) to characterize the transparency and disclosure practices. Exploratory, hypothesis-generating analyses will examine whether AITDI scores vary by journal impact tier, funding source, review-group affiliation (eg, Cochrane), review type, and depth of AI integration, using univariable and multivariable regression models as appropriate and restricted to AI-assisted reviews. In Aim 1, AITDI will be treated as a preliminary instrument and used primarily as an explanatory variable in exploratory models of SR quality. Formal refinement, reliability testing, and validation of AITDI will be conducted in Aim 2 and reported separately.</p><p>(4) Timeliness and dissemination will be assessed using linear regression or accelerated failure-time models for timeliness (days from protocol registration or earliest submission to publication) and negative binomial or linear regression for bibliometric impact (12-month citation counts and Altmetric Attention Scores). Because these outcomes are influenced by different mechanisms than methodological quality, models will adjust for outcome-specific covariates such as journal tier, publication year, and topic area rather than the core methodological confounders used in earlier analyses.</p><p>(5) Predictors of SR quality: exploratory multivariable models will assess characteristics associated with higher AMSTAR 2 and PRISMA 2020 scores, including review type, review question type, team size, journal tier, and clinical domain. These analyses will be hypothesis-generating. Among AI-assisted SRs, exploratory models will also examine whether higher AITDI transparency scores are associated with higher AMSTAR 2 and PRISMA 2020 scores, recognizing that AITDI is a provisional instrument at this stage.</p><p>(6) Depth of AI integration: among AI-assisted SRs, reviews using AI for core methodological tasks (screening, extraction, or risk-of-bias assessment) will be compared with those using AI only for supporting or reporting functions. Mean differences in AMSTAR 2 and PRISMA 2020 scores will be estimated using adjusted regression models with cluster-robust variance.</p></sec><sec id="s2-9-4"><title>Sensitivity Analyses</title><p>Several robustness checks will be conducted: (1) matched-set fixed-effects models to control for within-pair confounding; (2) propensity-weighted analyses using overlap weights on journal tier, funding source, and team size; (3) reclassification analyses restricting to reviews with clearly documented AI use in core methodological stages; (4) leave-one-journal-out analyses to assess the influence of high-volume outlets; (5) analyses restricted to standard (nonliving) systematic reviews to assess whether inclusion of living reviews influences the estimated association between AI assistance and methodological quality; (6) alternative specifications of methodological quality, including proportional odds ordinal logistic regression using AMSTAR 2 overall confidence categories and analyses restricted to prespecified critical domains only; and (7) to examine the potential impact of exposure misclassification due to undisclosed AI use, we will perform a sensitivity analysis restricted to comparator SRs with an explicit statement of no AI use.</p></sec><sec id="s2-9-5"><title>Missing Data</title><p>Missing methodological or bibliometric variables will first be recovered from supplementary files or registered protocols; if unresolved, authors will be contacted once. Remaining missing data will be handled via multiple imputation by chained equations, with complete-case analysis as a sensitivity check.</p></sec></sec><sec id="s2-10"><title>Sample Size and Power</title><p>This study is powered to assess noninferiority in methodological quality between AI-assisted and traditional SRs using the modified AMSTAR 2 adherence scores. Prior methodological evaluations comparing AI-assisted and non-AI reviews report AMSTAR 2 totals typically in the 6&#x2010;8 range with no significant differences between groups, supporting our assumption of a true mean difference close to zero and a SD of approximately 2.5&#x2010;3.0 [<xref ref-type="bibr" rid="ref7">7</xref>]. We define a noninferiority margin of &#x2212;1 point (AI minus comparator). Using a one-sided <italic>&#x03B1;</italic>=.025 and a planned sample of 150 AI-assisted reviews and 300 matched comparators (1:2 allocation), the study has approximately 98% power to demonstrate noninferiority under these assumptions. All power calculations were performed in R (R Foundation for Statistical Computing).</p><p>The number of eligible AI-assisted SRs is not known a priori. We will include all eligible AI-assisted reviews identified within the prespecified sampling frame, up to a target of 150. If fewer than 150 AI-assisted reviews are identified, all eligible reviews will be included, and the achieved precision and power will be reported. If yield is insufficient, a prespecified, stepwise expansion of the search (eg, extending the publication window and/or information sources using identical eligibility criteria and exposure definitions) will be undertaken and fully documented. If more than 150 eligible AI-assisted reviews are identified, inclusion will be capped at 150 unless additional inclusion is required to maintain matching balance or improve precision, in which case this will be specified a priori. The relative use of LLMs versus other AI tools (eg, screening classifiers) will be determined empirically during data extraction.</p></sec><sec id="s2-11"><title>Aim 2: Development, Refinement, and Evaluation of the AITDI</title><sec id="s2-11-1"><title>Rationale and Work to Date</title><p>AI use in SRs is often poorly disclosed, with important details (tool identity, model version, stage of use, prompts, or oversight) frequently omitted, limiting reproducibility and bias appraisal [<xref ref-type="bibr" rid="ref20">20</xref>]. We developed a preliminary version of the AITDI (<xref ref-type="table" rid="table1">Table 1</xref>) as a minimal transparency index designed to support meta-research (not a reporting guideline). Aim 2 will refine and validate the AITDI using Moher et al&#x2019;s [<xref ref-type="bibr" rid="ref15">15</xref>] reporting guideline development guidance.</p></sec><sec id="s2-11-2"><title>Phase 1: Preparatory Evidence Mapping and Initial Item Generation</title><p>We have started preparatory, concept-driven evidence mapping to identify common transparency gaps in published AI-assisted systematic reviews and to situate these gaps within the context of existing reporting expectations. This work involved targeted review of EQUATOR (Enhancing the Quality and Transparency Of health Research)-listed reporting guidelines [<xref ref-type="bibr" rid="ref21">21</xref>] and extensions relevant to AI and SR (PRISMA-S [PRISMA&#x2013;Search extension] [<xref ref-type="bibr" rid="ref22">22</xref>], CONSORT-AI [Consolidated Standards of Reporting Trials&#x2013;Artificial Intelligence extension], and SPIRIT-AI [Standard Protocol Items: Recommendations for Interventional Trials&#x2013;Artificial Intelligence extension] [<xref ref-type="bibr" rid="ref23">23</xref>]), alongside review of policy-level guidance (eg, International Committee of Medical Journal Editors AI updates [<xref ref-type="bibr" rid="ref24">24</xref>]) and recent scholarly proposals addressing AI transparency in medical research and systematic reviews (Generative Artificial Intelligence tools in Medical Research [<xref ref-type="bibr" rid="ref25">25</xref>], PRISMA-trAIce [PRISMA&#x2013;Transparent Reporting of Artificial Intelligence in Comprehensive Evidence Synthesis] [<xref ref-type="bibr" rid="ref26">26</xref>]). This phase will conclude with the consolidation of all candidate items into an initial checklist that will undergo structured evaluation in Phase 2. <xref ref-type="fig" rid="figure4">Figure 4</xref> outlines the multistep development process for the AITDI, including preparatory evidence mapping, Delphi surveys, consensus activities, pilot testing, and finalization.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Development process for the AI Transparency and Disclosure Index (AITDI). Steps include scoping, Delphi rounds, a consensus meeting, pilot testing, and finalization. AI: artificial intelligence; AITDI: AI Transparency and Disclosure Index; GAMER: Guidelines for AI in Medical Evidence Reporting; ICMJE: International Committee of Medical Journal Editors; PRISMA-S: Preferred Reporting Items for Systematic Reviews and Meta-Analyses&#x2013;Search; SR: systematic review.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e90588_fig04.png"/></fig></sec><sec id="s2-11-3"><title>Phase 2: Premeeting Delphi Exercise and Item Reduction</title><p>A 2&#x2010;3 round online Delphi survey [<xref ref-type="bibr" rid="ref27">27</xref>] will be conducted to refine and prioritize candidate AITDI items. Participants will be purposively recruited to form a multidisciplinary expert panel, including SR authors and methodologists (ie, researchers who conduct and publish SRs and are intended end users of the tool), AI/ML experts, information specialists, guideline developers, journal editors, clinicians, and patient partners. Eligible panelists will have demonstrated experience in conducting or publishing systematic reviews, AI-assisted research, reporting guideline development, editorial or peer-review activities, or use of systematic reviews in practice. We anticipate recruiting approximately 20&#x2010;40 panelists (consistent with best practices for Delphi studies and reporting guideline development). Panelists will rate each candidate item for importance and clarity using a structured Likert scale and may suggest item refinement, consolidation, or removal. After each round, anonymized summary statistics (including median ratings and rating distributions) and synthesized qualitative feedback will be shared with participants. A priori-defined consensus criteria will be used to guide item retention, modification, or removal across rounds, and results from the Delphi process will be used to prioritize items for discussion at the subsequent consensus meeting.</p></sec><sec id="s2-11-4"><title>Phase 3: Consensus Meeting</title><p>The consensus meeting will convene a purposively selected subset of participants from the Delphi exercise to ensure balanced representation across the relevant knowledge user groups and end user perspectives. Approximately 15&#x2010;25 participants will be invited, selected based on Delphi participation, demonstrated expertise, and representation across disciplines, with balanced representation across knowledge users. The meeting will focus on finalizing the AITDI checklist as a transparency assessment instrument, including discussion of the rationale, conceptual necessity, and transparency value of each candidate item, as well as development of a conceptual schema to visualize AI use across the SR workflow. Decisions will be informed by the Delphi results and relevant evidence summaries, with structured discussion and voting used to resolve disagreements regarding the content, structure, and presentation of the AITDI items. The meeting will also define next steps related to finalizing the index, including drafting responsibilities, authorship, and a knowledge translation strategy for dissemination of the AITDI.</p></sec><sec id="s2-11-5"><title>Phase 4: Drafting, Piloting, and Reliability Assessment</title><p>The final draft AITDI statement and an accompanying explanation and elaboration (E&#x0026;E) document will be prepared. The AITDI statement will outline the scope, development process, and final checklist. The E&#x0026;E document will provide a detailed rationale for each checklist item, with illustrative examples from the literature and empirical justification for inclusion. The checklist and E&#x0026;E document will be pilot-tested using a purposive sample of 20 AI-assisted systematic reviews identified in Aim 1. Two independent raters will apply the AITDI to these reviews to assess usability, clarity, and interrater agreement. In addition, raters will provide brief structured qualitative feedback (eg, short open-ended comments) on item interpretability, feasibility of application, and perceived burden. Quantitative agreement metrics and qualitative feedback will be synthesized to inform final refinements to the AITDI [<xref ref-type="bibr" rid="ref28">28</xref>]. Insights from Aim 1 regarding current reporting gaps and patterns of AI use, along with quantitative reliability metrics and qualitative feedback from Phase 4 pilot testing, will inform final refinements to the AITDI.</p></sec><sec id="s2-11-6"><title>Phase 5: Dissemination and Updating Strategy</title><p>We will submit the AITDI statement and E&#x0026;E document for peer-reviewed publication. We will also make the AITDI checklist, scoring manual, and example applications publicly available on an open-access platform, such as Open Science Framework (OSF), and seek inclusion of the AITDI in the EQUATOR Network&#x2019;s database of reporting guidelines for AI in SRs. A plan for updating the AITDI will be developed, with regular reviews scheduled to ensure that the index remains relevant as AI tools and disclosure standards evolve.</p></sec></sec><sec id="s2-12"><title>Aim 3: Reproducibility of AI-Generated Outputs</title><sec id="s2-12-1"><title>Study Overview</title><p>Aim 3 evaluates the reproducibility of AI performance across (1) repeated runs of the same model, (2) different AI models, and (3) AI compared with human reviewers. Unlike Aim 1, which evaluates completed SRs, Aim 3 focuses on task-level reproducibility, treating each SR task as a rating exercise analogous to human interrater reliability studies. Task sets will be derived from SRs included in Aim 1 to ensure diversity in review types, clinical domains, and methodological quality, and to enable construction of high-quality human consensus reference standards. Aim 3 is intentionally restricted to task types for which reproducibility can be meaningfully operationalized. Specifically, included tasks must have (1) discrete, well-defined inputs, (2) a constructible human consensus reference standard, and (3) evaluable agreement using established reliability metrics analogous to inter-rater reliability. Screening decisions, risk-of-bias assessments, data extraction, and evidence summarization meet these criteria and, therefore, allow direct comparison across repeated runs, AI models, and human reviewers. Other AI-assisted activities (eg, search strategy generation or narrative drafting) are conceptually important but do not satisfy these conditions in a way that supports formal reproducibility testing; accordingly, they are not examined through reproducibility metrics in Aim 3.</p></sec><sec id="s2-12-2"><title>Selection of SRs for Task Set Construction</title><p>A random, stratified subset of 20&#x2010;30 SRs from Aim 1 (stratified by clinical domain and AI-assisted vs traditional status) will be used to generate task sets. <xref ref-type="fig" rid="figure5">Figure 5</xref> illustrates the workflow for constructing reproducibility task sets and conducting parallel human and AI rating pathways. For all task sets, human raters will undergo standardized training on task-specific instructions and instruments prior to formal data collection. A calibration exercise will be conducted for each task type using a small subset of records or studies. During formal task execution, disagreements between raters will be resolved by consensus or adjudication by a third reviewer, as appropriate, to generate the human reference standard used for comparison with AI outputs.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Workflow for reproducibility task set construction and parallel human and AI rating pathways. AI: artificial intelligence; PICO: population, intervention, comparator, and outcome; SR: systematic review.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e90588_fig05.png"/></fig></sec></sec><sec id="s2-13"><title>Task Types</title><p>The following task types will be evaluated to assess reproducibility across AI-assisted systematic review workflows:</p><list list-type="order"><list-item><p>Screening decisions: Each task set will include 200&#x2010;400 titles/abstracts per SR, constructed by combining all included studies from the SR with additional records retrieved by rerunning the authors&#x2019; search. Two independent reviewers will apply the SR&#x2019;s eligibility criteria to all records, with disagreements resolved by consensus to form the human reference standard. AI models (eg, GPT-based [OpenAI], Claude [Anthropic PBC], and Llama [Meta Platforms]) will screen the same task sets using standardized prompt templates and standardized generation parameters (see &#x201C;AI Model Selection, Prompt Standardization, and Configuration&#x201D;). Each model will be run five times per task set to capture nondeterministic variability in generative outputs while maintaining feasibility across tasks and models; prior evaluations of LLM-assisted citation screening demonstrate measurable run-to-run variability, supporting the use of repeated executions when assessing reproducibility and agreement [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>].</p></list-item><list-item><p>Risk-of-bias assessments: for each SR, 20-40 included studies will be sampled. Two blinded human raters will conduct domain-level risk-of-bias assessments using the same instrument specified in the original SR (eg, RoB 2 for randomized trials and ROBINS-I for nonrandomized studies, as applicable). AI models will generate domain-level risk-of-bias ratings with required justifications across repeated runs using standardized prompts aligned to the same instruments and response options used by human raters (see &#x201C;AI Model Selection, Prompt Standardization, and Configuration&#x201D;).</p></list-item><list-item><p>Data extraction: for each SR, a purposive sample of 5-10 included studies will be selected to reflect variation in study design, outcome type (eg, binary, continuous, or time-to-event), and reporting complexity (eg, clearly reported vs ambiguous data). This range is sufficient to evaluate the reproducibility of data extraction across key quantitative variables while maintaining feasibility, given that the objective is reproducibility assessment rather than exhaustive extraction. Human extraction will occur in duplicate, while AI extraction will be repeated across models and runs using the same standardized extraction prompts and output schema.</p></list-item><list-item><p>Summary and conclusion generation: task sets will include PICO elements and the human reference quantitative results from Task 3. Two human reviewers will independently generate narrative summaries. AI models will generate summaries using standardized prompt templates and standardized generation parameters, and responses will be coded for correctness in direction of effect, magnitude or interpretation, and certainty language.</p></list-item></list></sec><sec id="s2-14"><title>AI Model Selection, Prompt Standardization, and Configuration</title><p>Selection of AI models for Aim 3 will follow transparent criteria rather than a fixed list of systems to ensure that the study reflects the range of contemporary LLMs available at the time of evaluation. To support reproducibility across repeated runs and across models, standardized prompt templates and output schemas will be developed for each task type before formal evaluation. Final prompt templates, task instructions, and output schemas will be archived on the project&#x2019;s OSF repository to support reproducibility. Models will be chosen to represent distinct classes of contemporary LLMs relevant to SR workflows (eg, closed-source commercial, independently developed commercial, and open- or partially open-source systems). Eligible models must be externally available (ie, not bespoke or internally developed), capable of processing the required task inputs, and permit documentation of generation settings sufficient to support reproducibility assessment.</p><p>For each task type, we will develop a prompt template before formal testing, which will be standardized in structure, instructions, and requested output format, with only task-specific fields varying across task sets (eg, eligibility criteria, risk-of-bias instrument, extraction fields, or PICO elements). Prompts will mirror the information available to human reviewers and will request only final structured decisions (eg, classification or extraction outputs) without generating chain-of-thought reasoning. Where model-specific formatting adjustments are required (eg, differences in system prompt syntax or input formatting), these adaptations will only be limited to formatting changes and will not modify the substantive task instructions or requested outputs.</p><p>All models will receive the same source materials within each task set, presented in the same order and format as far as platform constraints allow. For both repeated-run and cross-model analyses, the same prompt template, task instructions, target output schema, and generation settings will be applied without modification wherever technically feasible. Prompt templates will be pilot tested on a small subset of task sets before formal evaluation to optimize clarity and confirm feasibility; no further substantive prompt changes will be made once formal data collection begins. Any unavoidable deviations from the prespecified prompt templates will be reported.</p><p>Model names, versions, access conditions, dates of use, and generation settings (eg, temperature or equivalent stochasticity controls, output-length limits, and other available stability parameters) will be documented before analysis and applied consistently across task sets. No additional pretraining, fine-tuning, or model adaptation will be performed as part of this study; all LLMs will be used in their externally available configurations. Generation parameters (eg, temperature or equivalent controls) will be fixed across repeated runs and across models where technically feasible to support reproducibility.</p></sec><sec id="s2-15"><title>Reproducibility Metrics</title><p>Reproducibility will be evaluated using the following complementary metrics and comparative analyses:</p><list list-type="bullet"><list-item><p>Reproducibility Metrics</p></list-item><list-item><p>AI-AI reproducibility: agreement will be assessed across repeated runs of the same model and across different models using Cohen &#x03BA;, Fleiss &#x03BA;, or intraclass correlation coefficient (ICC), as appropriate.</p></list-item><list-item><p>AI-human agreement: human consensus will serve as the reference standard for all tasks. Agreement, sensitivity, specificity, and directional concordance will be calculated using the same reliability metrics applied to human-human comparisons (eg, Cohen &#x03BA;, weighted &#x03BA;, ICC) and interpreted relative to task-specific human reliability ceilings.</p></list-item><list-item><p>Human-human reliability: two independent raters per task will be assessed using the same interrater reliability metrics (eg, Cohen &#x03BA;, weighted &#x03BA;, or ICC, as appropriate) to establish the reliability ceilings for each task.</p></list-item><list-item><p>Exploratory comparisons: additional analyses will assess whether reproducibility differs for task sets derived from AI-assisted versus traditional systematic reviews and whether variations in prompting structure or model parameters influence reproducibility. These analyses will include comparisons of repeated runs conducted at low temperature versus default generation settings, where supported by the model.</p></list-item></list><p>For selected tasks that generate free-text outputs (eg, narrative summaries, risk-of-bias justifications, or conclusion statements), we will conduct a secondary, exploratory semantic concordance analysis using an independent LLM as an automated evaluator (&#x201C;LLM-as-a-judge&#x201D;) [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. This component is intended only to assess whether paired outputs are substantively similar in meaning when conventional structured agreement metrics are insufficient, and it will not replace the primary reproducibility analyses based on &#x03BA;- or ICC-based reproducibility estimates.</p><p>The judge model will evaluate paired outputs using a prespecified categorical rubric (eg, semantically equivalent, partially equivalent, or discordant). Judge prompts will be standardized, and the judge model will be instructed to assess semantic concordance rather than stylistic similarity. Where feasible, outputs will be deidentified and presented in random order to reduce bias related to source attribution (eg, model identity or human vs AI origin). We will document the judge model, model version, access date, and evaluation prompt and interpret findings in light of known limitations of LLM-as-a-judge approaches [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>As the LLM-as-a-judge approach may introduce additional model-dependent biases, this analysis will be treated as exploratory and will be benchmarked against independent human assessment of a random 10%&#x2010;20% sample of paired outputs, consistent with prior automation-assisted validation approaches in SR research [<xref ref-type="bibr" rid="ref33">33</xref>]. Two human reviewers will apply the same semantic concordance rubric, with disagreements resolved by discussion or third-reviewer adjudication. Human rating will be used to contextualize agreement estimates from the judge model, and any material discrepancies between human and LLM-derived semantic judgments will be reported. Results from the LLM-as-a-judge analysis will be presented descriptively and interpreted cautiously.</p></sec><sec id="s2-16"><title>Aim 4: Qualitative Component</title><sec id="s2-16-1"><title>Qualitative Study Design</title><p>We will conduct a qualitative study using interpretive description, situated within a constructivist paradigm, to explore how diverse knowledge users conceptualize rigor, transparency, and accountability in AI-assisted SRs. A constructivist approach is appropriate given the absence of a single objective standard for acceptable AI use in SR and the expectation that norms of rigor and trustworthiness are socially constructed and context-dependent [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. This aim complements the quantitative and experimental components (Aims 1&#x2010;3) by examining why gaps in reporting and reproducibility arise and what conditions knowledge users view as acceptable, unsafe, or in need of governance.</p></sec><sec id="s2-16-2"><title>Participants and Sampling</title><p>We will use purposive, maximum-variation sampling to recruit approximately 25&#x2010;30 participants across six knowledge user groups, including (1) SR authors and methodologists (including faculty researchers, graduate students, and research-active medical trainees who conduct, contribute to, peer-review, or critically use SRs, even if they do not primarily publish SRs), (2) journal editors and peer reviewers, (3) guideline developers and health technology assessment committee members, (4) clinicians (including practicing physicians and clinically focused trainees, such as residents and fellows) who use SRs in practice, (5) policymakers, funders, or organizational knowledge users, and (6) patient partners involved in research, guideline processes, or advisory roles. Eligibility criteria include age &#x2265;18 years, English proficiency, self-reported familiarity with SRs (eg, through authorship, peer review, guideline development, clinical use, or research training), and willingness to participate in a 45&#x2010;60-minute interview. Individuals recruited through a direct clinician-patient relationship with study investigators will be excluded. Sampling will continue until thematic saturation is reached within each knowledge user group, defined as the point at which no new themes emerge [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. We will target 5&#x2010;8 participants per knowledge user group (minimum 5 where feasible), with iterative monitoring of thematic sufficiency within and across groups [<xref ref-type="bibr" rid="ref37">37</xref>]. Sample sizes may be adjusted pragmatically to ensure diversity of perspectives.</p></sec><sec id="s2-16-3"><title>Recruitment and Consent</title><p>Participants will be identified through professional networks (eg, Cochrane and SR methods groups), author lists from Aims 1 and 3, editorial boards, guideline and health technology assessment committees, institutional knowledge user networks, and patient engagement organizations. Snowball sampling will be used to identify additional eligible participants. Interested individuals will receive an information sheet and will provide verbal or electronic informed consent prior to the interview. Consent will emphasize participants&#x2019; rights to withdraw at any time and that their responses will remain confidential. Participants will be offered a modest honorarium in recognition of their time and in accordance with institutional policies.</p></sec></sec><sec id="s2-17"><title>Data Collection</title><sec id="s2-17-1"><title>Overview</title><p>Data will be collected through semi-structured interviews (approximately 45&#x2010;60 minutes) conducted via secure videoconferencing (eg, Zoom [Zoom Video Communications Inc] for health care) or telephone. The interview guide will be informed by findings from Aims 1&#x2010;3 and will explore the following: (1) participants&#x2019; experience with SR and/or AI tools; (2) perceived benefits and risks of AI-assisted SRs across key workflow stages (searching, screening, extraction, appraisal, and summarization); (3) expectations for AI disclosure, human oversight, and accountability; and (4) conditions under which AI-assisted SRs would be considered sufficiently rigorous and trustworthy for clinical, policy, or patient-facing use. Interviews will be audio-recorded, professionally transcribed, and deidentified. The interview guide will be pilot-tested with 1&#x2010;2 participants and iteratively refined based on feedback, and data collection will continue until thematic sufficiency is achieved across all knowledge-user groups. The full interview guide is provided in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p></sec><sec id="s2-17-2"><title>Data Analysis and Rigor</title><p>We will use reflexive thematic analysis (Braun and Clarke [<xref ref-type="bibr" rid="ref38">38</xref>]) to analyze interview transcripts, supported by qualitative data analysis software (eg, NVivo [Lumivero]). The analytic process will include: (1) familiarization with the data, (2) initial coding, (3) development and refinement of a coding framework, and (4) iterative theme generation and review. Two investigators will independently code early transcripts to calibrate coding practices before proceeding to more flexible single-coding with regular analytic meetings. Rigor strategies will include reflexive journaling, peer debriefing within the investigator team, and light-touch member checking (eg, sharing a brief thematic summary with a small subset of participants to confirm resonance). Reporting will follow the Consolidated Criteria for Reporting Qualitative Research (COREQ) guidance [<xref ref-type="bibr" rid="ref39">39</xref>] to ensure rigor in qualitative research reporting.</p></sec><sec id="s2-17-3"><title>Integration With Other Aims</title><p>Qualitative findings will be used to (1) interpret quantitative differences in methodological quality and transparency identified in Aim 1, (2) contextualize reproducibility patterns observed in Aim 3, and (3) refine and prioritize AITDI domains and items in Aim 2, particularly around expectations for AI disclosure, human oversight, and acceptable versus unacceptable AI uses in SR workflows.</p></sec></sec><sec id="s2-18"><title>Ethical Considerations</title><p>Aims 1 and 2 (comparative quality assessment of published systematic reviews and development of the AI transparency and disclosure index) and Aim 3 (reproducibility testing using task sets derived from published reviews) involve secondary analysis of publicly available, deidentified data and simulated rating exercises using published materials. In accordance with institutional policy, these components qualify for exemption from research ethics board review.</p><p>Aim 4 (qualitative interviews with knowledge users) has been submitted to the Sunnybrook Research Ethics Board (approval pending at the time of manuscript revision; study ID: 7101). All interview participants will provide informed consent prior to participation. For the qualitative component, participants will receive an information sheet outlining study procedures, risks, and benefits, and consent will be obtained electronically or verbally prior to data collection. Participation is voluntary and participants may withdraw at any time without consequence. Audio-recorded interviews will be professionally transcribed and deidentified prior to analysis. Transcripts and analytic files will be stored on secure, password-protected institutional servers accessible only to authorized study personnel. Only anonymized or aggregated qualitative findings will be publicly reported. Participants in Aim 4 will receive an honorarium in recognition of their time and contributions.</p></sec><sec id="s2-19"><title>Patient Involvement and Dissemination</title><p>Patient and clinician partners will contribute to protocol refinement, interpretation of qualitative and quantitative findings, and development of accessible dissemination materials. Their involvement will be reported using Guidance for Reporting Involvement of Patients and the Public version 2 (GRIPP2) [<xref ref-type="bibr" rid="ref40">40</xref>]. Honoraria will be offered when feasible and appropriate. Study findings will be disseminated through peer-reviewed publications, conference presentations, and public repositories. An open-access AI transparency checklist for SR authors, editors, and guideline developers will be developed based on study results and made available through the project&#x2019;s OSF page. This strategy aims to support transparent and responsible use of AI in SR.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>As of December 2025, this study has been preregistered on the OSF (DOI: 10.17605/OSF.IO/Q5JRW), the search strategy has been finalized, and title/abstract screening has begun. Full-text screening and data extraction are planned for Feb-April 2026, followed by refinement and application of the AITDI and reproducibility testing from May to October 2026. Qualitative interviews are anticipated from October 2026 to February 2027. Final analyses and synthesis of quantitative and qualitative findings are expected to be completed by April 2027, with dissemination planned for mid-2027.</p></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><p>The integration of AI into SR workflows has accelerated rapidly. However, methodological and reporting frameworks used to appraise review quality have not kept pace [<xref ref-type="bibr" rid="ref2">2</xref>]. Existing evaluations have largely focused on isolated tasks (eg, screening) or narrow clinical domains, with limited attention to the overall methodological rigor, reporting completeness, transparency, and reproducibility of AI-assisted SRs [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. In addition, AI use is often poorly documented, making it difficult to assess where and how tools were applied, what safeguards were in place, and whether outputs are reproducible [<xref ref-type="bibr" rid="ref33">33</xref>]. This protocol evaluates the methodological quality, transparency, and reproducibility of AI-assisted SRs and explores knowledge-user expectations for acceptable use and disclosure.</p><p>This study has several methodological strengths. First, the matched comparative design in Aim 1 controls for publication year, topic area, review type, and meta-analysis status, reducing confounding from field differences and time-based trends. Second, dual independent scoring with validated appraisal (AMSTAR 2, ROBIS) and reporting tools (PRISMA 2020), combined with a structured AI transparency rubric (AITDI), provides a rigorous and reliable framework for evaluating both methodological quality and the completeness of AI-related reporting. Third, the reproducibility component (Aim 3) moves beyond single-run performance evaluations by examining agreement across repeated runs of the same model, across different models, and between AI and human reviewers, while benchmarking AI-human agreement against human-human reliability. Finally, the qualitative component (Aim 4), grounded in interpretive description within a constructivist paradigm, ensures that the quantitative and experimental findings are interpreted in light of how authors, editors, guideline developers, clinicians, policymakers, and patient partners conceptualize acceptable versus unsafe AI use, disclosure, and accountability in SRs.</p><p>There are also important limitations. Identification of AI-assisted reviews will depend on explicit reporting in manuscripts, supplementary materials, acknowledgements, or AI disclosure statements. As a result, underreporting may lead to misclassification and likely underestimates the true prevalence and depth of AI use in review workflows. Some reviews classified as traditional comparators may therefore have involved unreported AI use. Because currently available AI-detection tools primarily assess stylistic features rather than workflow-level methodological use, they are not sufficiently reliable for identifying undisclosed AI assistance in published reviews. If such misclassification is nondifferential with respect to methodological quality, it would be expected to bias effect estimates toward the null and reduce our ability to detect true between-group differences. However, disclosure of AI use may also correlate with other characteristics, such as author transparency, journal reporting standards, or methodological rigor. Accordingly, the comparison in Aim 1 should be interpreted as primarily reflecting reviews with reported AI use versus reviews without reported AI use, rather than a definitive causal contrast between truly AI-assisted and truly non&#x2013;AI-assisted reviews. Furthermore, quality assessment focuses on methodological and reporting standards rather than verifying numerical accuracy of extracted data or meta-analytic calculations, and unmeasured differences between AI-assisted and traditional reviews may remain despite matching and adjustment. Restriction to English-language health and biomedical journals may limit generalizability. In Aim 3, reproducibility estimates are based on a subset of contemporary models and tasks; as AI tools evolve, absolute performance may change. Residual confounding from unmeasured factors (eg, author team expertise or internal review processes) may persist. Estimates of the association between AI assistance and methodological quality will be interpreted cautiously. Incomplete reporting of prompting strategies or model configuration may further limit characterization of AI use. Moreover, in Aim 3, observed reproducibility may be influenced not only by underlying model behavior but also by prompt design and platform-specific implementation constraints; although prompts and generation settings will be standardized and documented, some residual prompt-related variability may remain. Finally, given that AI tools and reporting norms are evolving rapidly, findings from this 2023&#x2010;2025 sampling frame should be interpreted as reflecting an early phase of LLM integration into SR workflows.</p><p>Analogous to psychometric properties used to evaluate clinical tests (eg, sensitivity, specificity, and reliability), reproducibility estimates (eg, within-model stability across repeated runs, between-model agreement, and benchmarking against human-human variability) could represent a candidate set of performance characteristics that AI tools should report when used in SRs. While defining acceptable thresholds is beyond the scope of the present study, the empirical framework applied in Aim 3 illustrates how such metrics can be generated and interpreted and may inform future standards, journal policies, or guidance on acceptable AI use in SRs.</p><p>Despite these constraints, this study is well positioned to make several contributions. By providing empirical estimates of methodological and reporting quality for AI-assisted versus traditional SRs, it will clarify whether this AI integration is neutral or detrimental to review rigor. The AITDI is expected to offer a practical, structured instrument for documenting and appraising AI use, which can inform journal policies, editorial expectations, and future AI-specific extensions of existing tools (eg, PRISMA-LLM [PRISMA&#x2013;large language model] and A Measurement Tool to Assess Systematic Reviews-large language model [AMSTAR-LLM]). Reproducibility analyses will help delineate when AI tools can be treated as reasonably stable co-reviewers and when tighter human oversight or additional safeguards are warranted. The qualitative findings will complement these results by illuminating how different knowledge users define rigor, transparency, and trustworthiness in AI-assisted SRs and by identifying perceived &#x201C;red lines,&#x201D; acceptable use cases, and governance priorities.</p><p>This work is intended as a foundational step toward transparent, accountable, and methodologically robust use of AI in SR. As AI capabilities expand and disclosure expectations evolve, the framework, transparency index, reproducibility assessments, and knowledge user-informed insights developed here can be updated and reapplied to ensure that innovations in SR workflows are matched by appropriate standards for quality, transparency, and oversight. Future applications could include repeat evaluations in later periods (eg, 2027 and beyond). Extensions may also examine whether AI-assisted reviews differ from traditional reviews in the framing of their conclusions, building on prior meta-epidemiologic work evaluating conclusion optimism and interpretive &#x201C;spin&#x201D; in SRs [<xref ref-type="bibr" rid="ref41">41</xref>].</p></sec></body><back><ack><p>MJ is supported by the Vanier Canada Graduate Scholarship, the Fernand Labrie Fellowship from the Canadian Society of Endocrinology and Metabolism, the Hold&#x2019;em for Life Oncology Fellowship, and the Clinician Scientist Training Program at the University of Toronto. The funding bodies had no role in the conception, writing, or decision to submit this protocol.</p><p>During the preparation of this manuscript, the authors used ChatGPT (GPT-5.1, OpenAI) solely to assist with grammar, spelling, and phrasing. No AI tools were used to generate conceptual content, interpretations, or arguments. All content was reviewed, edited, and verified by the authors, who take full responsibility for the accuracy, integrity, and originality of the manuscript.</p></ack><notes><sec><title>Funding</title><p>This research did not receive any funding in the public, commercial, or not-for-profit sectors.</p></sec><sec><title>Data Availability</title><p>Data sharing is not applicable to this article as no datasets were generated or analyzed during this study.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: MJ, LL, AE</p><p>Methodology: MJ, SES, EW, RS, CY, LGC, CD</p><p>Project administration: MJ</p><p>Investigation: MJ</p><p>Formal analysis: MJ, RS</p><p>Resources: EW</p><p>Software: CD</p><p>Supervision: LL, AE</p><p>Visualization: MJ, MM</p><p>Writing &#x2013; original draft: MJ</p><p>Writing &#x2013; review &#x0026; editing: MJ, MM, SES, EW, RS, CY, LGC, CD, LL, AE</p><p>All authors reviewed and approved the final manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AITDI</term><def><p>AI Transparency and Disclosure Index</p></def></def-item><def-item><term id="abb3">AMSTAR 2</term><def><p>A Measurement Tool to Assess Systematic Reviews version 2</p></def></def-item><def-item><term id="abb4">AMSTAR-LLM</term><def><p>A Measurement Tool to Assess Systematic Reviews-Large Language Model</p></def></def-item><def-item><term id="abb5">CONSORT-AI</term><def><p>Consolidated Standards of Reporting Trials-artificial intelligence extension</p></def></def-item><def-item><term id="abb6">COREQ</term><def><p>Consolidated Criteria for Reporting Qualitative Research</p></def></def-item><def-item><term id="abb7">E&#x0026;E</term><def><p>explanation and elaboration</p></def></def-item><def-item><term id="abb8">EQUATOR</term><def><p>Enhancing the Quality and Transparency of Health Research</p></def></def-item><def-item><term id="abb9">GRIPP2</term><def><p>Guidance for Reporting Involvement of Patients and the Public version 2</p></def></def-item><def-item><term id="abb10">ICC</term><def><p>intraclass correlation coefficient</p></def></def-item><def-item><term id="abb11">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb12">MeSH</term><def><p>Medical Subject Headings</p></def></def-item><def-item><term id="abb13">OSF</term><def><p>Open Science Framework</p></def></def-item><def-item><term id="abb14">PICO</term><def><p>population, intervention, comparator, and outcome</p></def></def-item><def-item><term id="abb15">PRESS</term><def><p>Peer Review of Electronic Search Strategies</p></def></def-item><def-item><term id="abb16">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb17">PRISMA-LLM</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses for Large Language Model</p></def></def-item><def-item><term id="abb18">PRISMA-S</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses-Search extension</p></def></def-item><def-item><term id="abb19">PRISMA-trAIce</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses-Transparent Reporting of Artificial Intelligence in Comprehensive Evidence Synthesis</p></def></def-item><def-item><term id="abb20">ROB2</term><def><p>Risk of Bias 2</p></def></def-item><def-item><term id="abb21">ROBINS-I</term><def><p>Risk of Bias in Nonrandomized Studies of Interventions</p></def></def-item><def-item><term id="abb22">ROBIS</term><def><p>Risk of Bias in Systematic Reviews</p></def></def-item><def-item><term id="abb23">SPIRIT-AI</term><def><p>Standard Protocol Items: Recommendations for Interventional Trials-artificial intelligence extension</p></def></def-item><def-item><term id="abb24">SR</term><def><p>systematic review.</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><article-title>Systematic review methodology: conducting high-quality reviews and understanding their significance in evidence-based practice</article-title><source>J Int Med Graduates</source><year>2023</year><volume>2</volume><issue>1</issue><pub-id pub-id-type="doi">10.56570/jimgs.v2i1.76</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Petri</surname><given-names>D</given-names> </name></person-group><article-title>Evidence synthesis 2.0: how artificial intelligence is making systematic reviews more efficient</article-title><source>Recenti Prog Med</source><year>2023</year><month>06</month><volume>114</volume><issue>6</issue><fpage>359</fpage><lpage>361</lpage><pub-id pub-id-type="doi">10.1701/4042.40229</pub-id><pub-id pub-id-type="medline">37229683</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>XL</given-names> </name></person-group><article-title>A comparative study of screening performance between abstrackr and GPT models: systematic review and contextual analysis</article-title><source>BMC Med Inform Decis Mak</source><year>2025</year><month>08</month><day>7</day><volume>25</volume><issue>1</issue><fpage>293</fpage><pub-id pub-id-type="doi">10.1186/s12911-025-03138-w</pub-id><pub-id pub-id-type="medline">40775694</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ouzzani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hammady</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fedorowicz</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Elmagarmid</surname><given-names>A</given-names> </name></person-group><article-title>Rayyan-a web and mobile app for systematic reviews</article-title><source>Syst Rev</source><year>2016</year><month>12</month><day>5</day><volume>5</volume><issue>1</issue><fpage>210</fpage><pub-id pub-id-type="doi">10.1186/s13643-016-0384-4</pub-id><pub-id pub-id-type="medline">27919275</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gartlehner</surname><given-names>G</given-names> </name><name name-style="western"><surname>Wagner</surname><given-names>G</given-names> </name><name name-style="western"><surname>Lux</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Assessing the accuracy of machine-assisted abstract screening with DistillerAI: a user study</article-title><source>Syst Rev</source><year>2019</year><month>11</month><day>15</day><volume>8</volume><issue>1</issue><fpage>277</fpage><pub-id pub-id-type="doi">10.1186/s13643-019-1221-3</pub-id><pub-id pub-id-type="medline">31727159</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maurya</surname><given-names>I</given-names> </name><name name-style="western"><surname>Lohiya</surname><given-names>A</given-names> </name><name name-style="western"><surname>Maurya</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Garg</surname><given-names>R</given-names> </name></person-group><article-title>Automated tools in systematic reviews: current trends</article-title><source>Indian J Anaesth</source><year>2025</year><month>12</month><volume>69</volume><issue>12</issue><fpage>1416</fpage><lpage>1421</lpage><pub-id pub-id-type="doi">10.4103/ija.ija_1008_25</pub-id><pub-id pub-id-type="medline">41395143</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tercero-Hidalgo</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>KS</given-names> </name><name name-style="western"><surname>Bueno-Cavanillas</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Artificial intelligence in COVID-19 evidence syntheses was underutilized, but impactful: a methodological study</article-title><source>J Clin Epidemiol</source><year>2022</year><month>08</month><volume>148</volume><fpage>124</fpage><lpage>134</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2022.04.027</pub-id><pub-id pub-id-type="medline">35513213</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yao</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Lopez</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Rizk</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Aggarwal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Namdari</surname><given-names>S</given-names> </name></person-group><article-title>Evaluation of a popular large language model in orthopedic literature review: comparison to previously published reviews</article-title><source>Arch Bone Jt Surg</source><year>2025</year><volume>13</volume><issue>8</issue><fpage>460</fpage><lpage>469</lpage><pub-id pub-id-type="doi">10.22038/ABJS.2025.84896.3874</pub-id><pub-id pub-id-type="medline">40951516</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Woelfle</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hirt</surname><given-names>J</given-names> </name><name name-style="western"><surname>Janiaud</surname><given-names>P</given-names> </name><name name-style="western"><surname>Kappos</surname><given-names>L</given-names> </name><name name-style="western"><surname>Ioannidis</surname><given-names>JPA</given-names> </name><name name-style="western"><surname>Hemkens</surname><given-names>LG</given-names> </name></person-group><article-title>Benchmarking human-AI collaboration for common evidence appraisal tools</article-title><source>J Clin Epidemiol</source><year>2024</year><month>11</month><volume>175</volume><fpage>111533</fpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2024.111533</pub-id><pub-id pub-id-type="medline">39277058</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamel</surname><given-names>C</given-names> </name><name name-style="western"><surname>Hersi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kelly</surname><given-names>SE</given-names> </name><etal/></person-group><article-title>Guidance for using artificial intelligence for title and abstract screening while conducting knowledge syntheses</article-title><source>BMC Med Res Methodol</source><year>2021</year><month>12</month><day>20</day><volume>21</volume><issue>1</issue><fpage>285</fpage><pub-id pub-id-type="doi">10.1186/s12874-021-01451-2</pub-id><pub-id pub-id-type="medline">34930132</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lunny</surname><given-names>C</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>N</given-names> </name><name name-style="western"><surname>Nazari</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Exploring the methodological quality and risk of bias in 200 systematic reviews: a comparative study of ROBIS and AMSTAR-2 tools</article-title><source>Res Synth Methods</source><year>2026</year><month>01</month><volume>17</volume><issue>1</issue><fpage>63</fpage><lpage>92</lpage><pub-id pub-id-type="doi">10.1017/rsm.2025.10032</pub-id><pub-id pub-id-type="medline">41626887</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiting</surname><given-names>P</given-names> </name><name name-style="western"><surname>Savovi&#x0107;</surname><given-names>J</given-names> </name><name name-style="western"><surname>Higgins</surname><given-names>JPT</given-names> </name><etal/></person-group><article-title>ROBIS: a new tool to assess risk of bias in systematic reviews was developed</article-title><source>J Clin Epidemiol</source><year>2016</year><month>01</month><volume>69</volume><fpage>225</fpage><lpage>234</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2015.06.005</pub-id><pub-id pub-id-type="medline">26092286</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Page</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><etal/></person-group><article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title><source>BMJ</source><year>2021</year><month>03</month><day>29</day><volume>372</volume><fpage>n71</fpage><pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id><pub-id pub-id-type="medline">33782057</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Movva</surname><given-names>R</given-names> </name><name name-style="western"><surname>Balachandar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>K</given-names> </name><name name-style="western"><surname>Agostini</surname><given-names>G</given-names> </name><name name-style="western"><surname>Garg</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pierson</surname><given-names>E</given-names> </name></person-group><article-title>Topics, authors, and institutions in large language model research: trends from 17K arxiv papers</article-title><access-date>2026-05-06</access-date><conf-name>Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics</conf-name><conf-date>Jun 16-21, 2024</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://aclanthology.org/2024.naacl-long">https://aclanthology.org/2024.naacl-long</ext-link></comment><pub-id pub-id-type="doi">10.18653/v1/2024.naacl-long.67</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Schulz</surname><given-names>KF</given-names> </name><name name-style="western"><surname>Simera</surname><given-names>I</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name></person-group><article-title>Guidance for developers of health research reporting guidelines</article-title><source>PLoS Med</source><year>2010</year><month>02</month><day>16</day><volume>7</volume><issue>2</issue><fpage>e1000217</fpage><pub-id pub-id-type="doi">10.1371/journal.pmed.1000217</pub-id><pub-id pub-id-type="medline">20169112</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hennessy</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bilker</surname><given-names>WB</given-names> </name><name name-style="western"><surname>Berlin</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Strom</surname><given-names>BL</given-names> </name></person-group><article-title>Factors influencing the optimal control-to-case ratio in matched case-control studies</article-title><source>Am J Epidemiol</source><year>1999</year><month>01</month><day>15</day><volume>149</volume><issue>2</issue><fpage>195</fpage><lpage>197</lpage><pub-id pub-id-type="doi">10.1093/oxfordjournals.aje.a009786</pub-id><pub-id pub-id-type="medline">9921965</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shea</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Reeves</surname><given-names>BC</given-names> </name><name name-style="western"><surname>Wells</surname><given-names>G</given-names> </name><etal/></person-group><article-title>AMSTAR 2: a critical appraisal tool for systematic reviews that include randomised or non-randomised studies of healthcare interventions, or both</article-title><source>BMJ</source><year>2017</year><month>09</month><day>21</day><volume>358</volume><fpage>j4008</fpage><pub-id pub-id-type="doi">10.1136/bmj.j4008</pub-id><pub-id pub-id-type="medline">28935701</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="web"><article-title>SR / MA / HTA / ITC &#x2013; CINAHL canada&#x2019;s drug agency</article-title><source>Canada&#x2019;s Drug Agency</source><access-date>2025-12-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cda-amc.ca/">https://www.cda-amc.ca/</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Blackwood</surname><given-names>D</given-names> </name></person-group><article-title>Peer review of electronic search strategies (PRESS): &#x201C;Can you check my systematic review search strategy</article-title><year>2015</year><access-date>2026-05-06</access-date><publisher-name>HLA News</publisher-name><fpage>9</fpage><lpage>10</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://search.informit.org/doi/abs/10.3316/informit.314342355541565">https://search.informit.org/doi/abs/10.3316/informit.314342355541565</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ge</surname><given-names>L</given-names> </name><name name-style="western"><surname>Agrawal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Singer</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Leveraging artificial intelligence to enhance systematic reviews in health research: advanced tools and challenges</article-title><source>Syst Rev</source><year>2024</year><volume>13</volume><issue>1</issue><pub-id pub-id-type="doi">10.1186/s13643-024-02682-2</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Simera</surname><given-names>I</given-names> </name><name name-style="western"><surname>Hoey</surname><given-names>J</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Schulz</surname><given-names>K</given-names> </name></person-group><article-title>EQUATOR: reporting guidelines for health research</article-title><source>Open Med</source><year>2008</year><volume>2</volume><issue>2</issue><fpage>e49</fpage><lpage>50</lpage><pub-id pub-id-type="medline">21602941</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rethlefsen</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Kirtley</surname><given-names>S</given-names> </name><name name-style="western"><surname>Waffenschmidt</surname><given-names>S</given-names> </name><etal/></person-group><article-title>PRISMA-S: an extension to the PRISMA statement for reporting literature searches in systematic reviews</article-title><source>Syst Rev</source><year>2021</year><month>01</month><day>26</day><volume>10</volume><issue>1</issue><fpage>39</fpage><pub-id pub-id-type="doi">10.1186/s13643-020-01542-z</pub-id><pub-id pub-id-type="medline">33499930</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ibrahim</surname><given-names>H</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Rivera</surname><given-names>SC</given-names> </name><etal/></person-group><article-title>Reporting guidelines for clinical trials of artificial intelligence interventions: the SPIRIT-AI and CONSORT-AI guidelines</article-title><source>Trials</source><year>2021</year><month>01</month><day>6</day><volume>22</volume><issue>1</issue><fpage>11</fpage><pub-id pub-id-type="doi">10.1186/s13063-020-04951-6</pub-id><pub-id pub-id-type="medline">33407780</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dyer</surname><given-names>M</given-names> </name></person-group><article-title>ICMJE recommendations update: what&#x2019;s new?</article-title><source>Med Writ</source><year>2024</year><month>06</month><access-date>2026-05-06</access-date><volume>33</volume><issue>2</issue><fpage>83</fpage><lpage>85</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://journal.emwa.org/soft-skills-for-medical-writers">https://journal.emwa.org/soft-skills-for-medical-writers</ext-link></comment><pub-id pub-id-type="doi">10.56012/ugaa9986</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Luo</surname><given-names>X</given-names> </name><name name-style="western"><surname>Tham</surname><given-names>YC</given-names> </name><name name-style="western"><surname>Giuffr&#x00E8;</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Reporting guideline for the use of generative artificial intelligence tools in medical research: the GAMER Statement</article-title><source>BMJ Evid Based Med</source><year>2025</year><month>12</month><day>1</day><volume>30</volume><issue>6</issue><fpage>390</fpage><lpage>400</lpage><pub-id pub-id-type="doi">10.1136/bmjebm-2025-113825</pub-id><pub-id pub-id-type="medline">40360239</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holst</surname><given-names>D</given-names> </name><name name-style="western"><surname>Moenck</surname><given-names>K</given-names> </name><name name-style="western"><surname>Koch</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schmedemann</surname><given-names>O</given-names> </name><name name-style="western"><surname>Sch&#x00FC;ppstuhl</surname><given-names>T</given-names> </name></person-group><article-title>Transparent reporting of AI in systematic literature reviews: development of the PRISMA-trAIce checklist</article-title><source>JMIR AI</source><year>2025</year><month>12</month><day>10</day><volume>4</volume><fpage>e80247</fpage><pub-id pub-id-type="doi">10.2196/80247</pub-id><pub-id pub-id-type="medline">41370833</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Spranger</surname><given-names>J</given-names> </name><name name-style="western"><surname>Homberg</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sonnberger</surname><given-names>M</given-names> </name><name name-style="western"><surname>Niederberger</surname><given-names>M</given-names> </name></person-group><article-title>Reporting guidelines for Delphi techniques in health sciences: a methodological review</article-title><source>Z Evid Fortbild Qual Gesundhwes</source><year>2022</year><month>08</month><volume>172</volume><fpage>1</fpage><lpage>11</lpage><pub-id pub-id-type="doi">10.1016/j.zefq.2022.04.025</pub-id><pub-id pub-id-type="medline">35718726</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sivakumar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Pan</surname><given-names>RY</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Assessing the sustainability and scalability of a diabetes eHealth innovation: a mixed-methods study</article-title><source>BMC Health Serv Res</source><year>2023</year><month>06</month><day>14</day><volume>23</volume><issue>1</issue><fpage>630</fpage><pub-id pub-id-type="doi">10.1186/s12913-023-09618-x</pub-id><pub-id pub-id-type="medline">37316850</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oami</surname><given-names>T</given-names> </name><name name-style="western"><surname>Okada</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Nakada</surname><given-names>TA</given-names> </name></person-group><article-title>Optimal large language models to screen citations for systematic reviews</article-title><source>Res Synth Methods</source><year>2025</year><month>11</month><volume>16</volume><issue>6</issue><fpage>859</fpage><lpage>875</lpage><pub-id pub-id-type="doi">10.1017/rsm.2025.10014</pub-id><pub-id pub-id-type="medline">41626985</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chelli</surname><given-names>M</given-names> </name><name name-style="western"><surname>Descamps</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lavou&#x00E9;</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Hallucination rates and reference accuracy of ChatGPT and Bard for systematic reviews: comparative analysis</article-title><source>J Med Internet Res</source><year>2024</year><month>05</month><day>22</day><volume>26</volume><fpage>e53164</fpage><pub-id pub-id-type="doi">10.2196/53164</pub-id><pub-id pub-id-type="medline">38776130</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Son</surname><given-names>G</given-names> </name><name name-style="western"><surname>Ko</surname><given-names>H</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>H</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Hong</surname><given-names>S</given-names> </name></person-group><article-title>LLM-as-a-judge and reward model: what they can and cannot do</article-title><source>arXiv</source><comment>Preprint posted online on  Sep 17, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2409.11239</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Burns</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bally</surname><given-names>J</given-names> </name><name name-style="western"><surname>Burles</surname><given-names>M</given-names> </name><name name-style="western"><surname>Holtslander</surname><given-names>L</given-names> </name><name name-style="western"><surname>Peacock</surname><given-names>S</given-names> </name></person-group><article-title>Constructivist grounded theory or interpretive phenomenology? methodological choices within specific study contexts</article-title><source>Int J Qual Methods</source><year>2022</year><month>04</month><volume>21</volume><fpage>16094069221077758</fpage><pub-id pub-id-type="doi">10.1177/16094069221077758</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Dijk</surname><given-names>SHB</given-names> </name><name name-style="western"><surname>Brusse-Keizer</surname><given-names>MGJ</given-names> </name><name name-style="western"><surname>Bucs&#x00E1;n</surname><given-names>CC</given-names> </name><name name-style="western"><surname>van der Palen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Doggen</surname><given-names>CJM</given-names> </name><name name-style="western"><surname>Lenferink</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence in systematic reviews: promising when appropriately used</article-title><source>BMJ Open</source><year>2023</year><month>07</month><day>7</day><volume>13</volume><issue>7</issue><fpage>e072254</fpage><pub-id pub-id-type="doi">10.1136/bmjopen-2023-072254</pub-id><pub-id pub-id-type="medline">37419641</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abdullah Kamal</surname><given-names>S</given-names> </name><collab>School of Education, The University of Sheffield, Sheffield, England, United Kingdom</collab></person-group><article-title>Research paradigm and the philosophical foundations of a qualitative study</article-title><source>PEOPLE Int J Soc Sci</source><year>2019</year><volume>4</volume><issue>3</issue><fpage>1386</fpage><lpage>1394</lpage><pub-id pub-id-type="doi">10.20319/pijss.2019.43.13861394</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahmed</surname><given-names>SK</given-names> </name></person-group><article-title>Sample size for saturation in qualitative research: debates, definitions, and strategies</article-title><source>J Med Surg Public Health</source><year>2025</year><month>04</month><volume>5</volume><fpage>100171</fpage><pub-id pub-id-type="doi">10.1016/j.glmedi.2024.100171</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Naeem</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ozuem</surname><given-names>W</given-names> </name><name name-style="western"><surname>Howell</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ranfagni</surname><given-names>S</given-names> </name></person-group><article-title>Demystification and actualisation of data saturation in qualitative research through thematic analysis</article-title><source>Int J Qual Methods</source><year>2024</year><month>01</month><volume>23</volume><pub-id pub-id-type="doi">10.1177/16094069241229777</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Young</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Casey</surname><given-names>EA</given-names> </name></person-group><article-title>An examination of the sufficiency of small qualitative samples</article-title><source>Soc Work Res</source><year>2018</year><pub-id pub-id-type="doi">10.1093/swr/svy026</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name><name name-style="western"><surname>Hayfield</surname><given-names>N</given-names> </name><name name-style="western"><surname>Davey</surname><given-names>L</given-names> </name><name name-style="western"><surname>Jenkinson</surname><given-names>E</given-names> </name></person-group><article-title>Doing reflexive thematic analysis</article-title><source>Supporting Research in Counselling and Psychotherapy: Qualitative, Quantitative, and Mixed Methods Research</source><year>2023</year><publisher-name>Springer</publisher-name><fpage>19</fpage><lpage>38</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-13942-0_2</pub-id><pub-id pub-id-type="other">978-3-031-13941-3</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hannes</surname><given-names>K</given-names> </name><name name-style="western"><surname>Harden</surname><given-names>A</given-names> </name><name name-style="western"><surname>Noyes</surname><given-names>J</given-names> </name><name name-style="western"><surname>Harris</surname><given-names>J</given-names> </name><name name-style="western"><surname>Tong</surname><given-names>A</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Schulz</surname><given-names>KF</given-names> </name><name name-style="western"><surname>Simera</surname><given-names>I</given-names> </name><name name-style="western"><surname>Wager</surname><given-names>E</given-names> </name></person-group><article-title>COREQ (consolidated criteria for reporting qualitative studies)</article-title><source>Guidelines for Reporting Health Research: A User&#x2019;s Manual</source><year>2014</year><fpage>214</fpage><lpage>226</lpage><pub-id pub-id-type="doi">10.1002/9781118715598</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Staniszewska</surname><given-names>S</given-names> </name><name name-style="western"><surname>Brett</surname><given-names>J</given-names> </name><name name-style="western"><surname>Simera</surname><given-names>I</given-names> </name><etal/></person-group><article-title>GRIPP2 reporting checklists: tools to improve reporting of patient and public involvement in research</article-title><source>BMJ</source><year>2017</year><month>08</month><day>2</day><volume>358</volume><fpage>j3453</fpage><pub-id pub-id-type="doi">10.1136/bmj.j3453</pub-id><pub-id pub-id-type="medline">28768629</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tricco</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Tetzlaff</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pham</surname><given-names>B</given-names> </name><name name-style="western"><surname>Brehaut</surname><given-names>J</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name></person-group><article-title>Non-Cochrane vs. Cochrane reviews were twice as likely to have positive conclusion statements: cross-sectional study</article-title><source>J Clin Epidemiol</source><year>2009</year><month>04</month><volume>62</volume><issue>4</issue><fpage>380</fpage><lpage>386</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2008.08.008</pub-id><pub-id pub-id-type="medline">19128940</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Summary of primary and secondary outcomes and quality assessment tools.</p><media xlink:href="resprot_v15i1e90588_app1.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Full search strategies for MEDLINE, Cochrane, and CINAHL (Executed December 3-5, 2025).</p><media xlink:href="resprot_v15i1e90588_app2.docx" xlink:title="DOCX File, 23 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Data extraction variables and coding guidance.</p><media xlink:href="resprot_v15i1e90588_app3.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Semistructured interview guide.</p><media xlink:href="resprot_v15i1e90588_app4.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material></app-group></back></article>