<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Res Protoc</journal-id><journal-id journal-id-type="publisher-id">ResProt</journal-id><journal-id journal-id-type="index">5</journal-id><journal-title>JMIR Research Protocols</journal-title><abbrev-journal-title>JMIR Res Protoc</abbrev-journal-title><issn pub-type="epub">1929-0748</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v15i1e79875</article-id><article-id pub-id-type="doi">10.2196/79875</article-id><article-categories><subj-group subj-group-type="heading"><subject>Protocol</subject></subj-group></article-categories><title-group><article-title>Using AI Algorithms and Machine Learning in the Analysis of a Bio-Purification Method (Therapeutic Emesis, Known as &#x201C;Vamana Karma&#x201D;): Protocol for a Mixed Methods Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Rani</surname><given-names>Pooja</given-names></name><degrees>BAMS, MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Kalra</surname><given-names>Sumit</given-names></name><degrees>B Tech, M Tech, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Singh</surname><given-names>Sachin</given-names></name><degrees>B Tech CE-SE</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>David</surname><given-names>Richard</given-names></name><degrees>BS (EECS)</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gupta</surname><given-names>Ashutosh Ravi</given-names></name><degrees>B Tech, M Tech</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>P V</surname><given-names>Anandaraman</given-names></name><degrees>BAMS, MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Panchakarma, All India Institute of Ayurveda</institution><addr-line>Mathura Rd, Gautampuri Awas, Sarita Vihar</addr-line><addr-line>New Delhi</addr-line><addr-line>Delhi</addr-line><country>India</country></aff><aff id="aff2"><institution>Department of Computer Science and Engineering, Indian Institute of Technology Jodhpur</institution><addr-line>Jodhpur</addr-line><addr-line>Rajasthan</addr-line><country>India</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Shaffi</surname><given-names>Shamnad Mohamed</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Pooja Rani, BAMS, MD, Department of Panchakarma, All India Institute of Ayurveda, Mathura Rd, Gautampuri Awas, Sarita Vihar, New Delhi, Delhi, 110076, India, 91 8851304239; <email>drpoojapathak369@gmail.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>3</day><month>2</month><year>2026</year></pub-date><volume>15</volume><elocation-id>e79875</elocation-id><history><date date-type="received"><day>01</day><month>07</month><year>2025</year></date><date date-type="rev-recd"><day>18</day><month>11</month><year>2025</year></date><date date-type="accepted"><day>18</day><month>11</month><year>2025</year></date></history><copyright-statement>&#x00A9; Pooja Rani, Sumit Kalra, Sachin Singh, Richard David, Ashutosh Ravi Gupta, Anandaraman P V. Originally published in JMIR Research Protocols (<ext-link ext-link-type="uri" xlink:href="https://www.researchprotocols.org">https://www.researchprotocols.org</ext-link>), 3.2.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Research Protocols, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.researchprotocols.org">https://www.researchprotocols.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.researchprotocols.org/2026/1/e79875"/><abstract><sec><title>Background</title><p>Therapeutic emesis (TE), known as <italic>vamana karma</italic>, is a classical method of detoxification performed to eliminate vitiated <italic>kapha</italic> (bio-humor governing fluid regulation and structural cohesion of the body in normalcy) ailments from the body. The assessment of this complete process depends on physicians&#x2019; visual assessments of vomitus features and patient responses, introducing subjectivity and interobserver variability. Moreover, this method requires more than continuous monitoring; thus, it can sometimes lead to human error, resulting in missed expelled content or complications. We propose an artificial intelligence (AI) model to monitor TE to observe visual changes (ie, patient vomitus content and gestures) to provide better clinical outcomes. This approach is being explored for the first time in the traditional system of medicine.</p></sec><sec><title>Objective</title><p>This study aims to develop and validate an AI-assisted digital framework for the objective evaluation of TE via (1) automatic vomitus detection, (2) content classification, (3) number of bouts expelled, (4) facial expressions and individual gestures, (5) determination of detoxification type, and (6) provision of a postpurificatory dietary regimen after completion.</p></sec><sec sec-type="methods"><title>Methods</title><p>The study will be conducted in 3 phases. The first is the preparation of standard operating procedure for TE data collection. The second is data annotation of detected vomiting events. All analyses will be conducted using Python libraries, including <italic>scikit-learn</italic> (version 1.3.2; developed by the scikit-learn contributors, Python Software Foundation), <italic>TensorFlow</italic> (version 2.14.0; Google Brain Team, Google LLC), and tools supported under Google Summer of Code 2023 (Google LLC), along with SPSS Statistics (version 26.0; IBM Corp) for statistical analysis. In the third phase, model performance will be evaluated using standard machine learning metrics, and agreement with expert assessments will be measured using the Fleiss &#x03BA; statistic. This study is exploratory in nature. Thus, 50 volunteers will be targeted.</p></sec><sec sec-type="results"><title>Results</title><p>This is the first study of its kind, so to create the dataset, we prepared a standard operating procedure for TE event data collection. Data collection was completed in December 2025. Data annotation and preliminary model preparation are ongoing, with final testing and validation expected to be completed by December 2025. External testing in the health care setting is expected to be completed by February 2026.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This work presents one of the first attempts to apply deep learning for objective analysis of the TE process in Ayurveda. By combining YOLOv9 for vomit detection and residual neural network for classification, the framework achieves promising accuracy in automated vomit identification. The results will demonstrate the potential of AI-assisted analysis in traditional medicine, although further clinical validation and expansion across multiple centers will be necessary before deployment in real-world settings.</p></sec><sec><title>Trial Registration</title><p>Clinical Trials Registry- India CTRI/2023/03/051087; https://ctri.nic.in/Clinicaltrials/pmaindet2.php?EncHid=ODI4NTA=&#x0026;Enc=&#x0026;userName=</p></sec><sec sec-type="registered-report"><title>International Registered Report Identifier (IRRID)</title><p>DERR1-10.2196/79875</p></sec></abstract><kwd-group><kwd>validation</kwd><kwd>therapeutic emesis</kwd><kwd>TE</kwd><kwd>vamana karma</kwd><kwd>traditional medicine</kwd><kwd>protocol</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>The traditional Indian system of medicine promotes overall health and wellness through personalized therapies and a detoxifying process. Therapeutic emesis (TE), or <italic>vamana karma</italic>, is one of the bio-pentavalent purification procedures used to treat deranged <italic>kapha</italic> ailments that include metabolic, respiratory, and dermatological conditions such as psoriasis and eczema [<xref ref-type="bibr" rid="ref1">1</xref>]. TE is conducted in 3 phases (ie, preparatory phase, main phase, and postpurificatory phase). In the main phase, volunteers are subjected to induced vomiting through the provision of emetic herbal medicines. After initiation of the process, the individual expels the vitiated content, concurrently evident through physical changes (concurrently evident include the onset of nausea, increased salivation, abdominal discomfort, sweating, piloerection, lacrimation, and the progressive expulsion of gastric contents, reflecting effective mobilization and elimination of morbid bio-humor) [<xref ref-type="bibr" rid="ref2">2</xref>]. This procedure is carried out early in the morning and takes approximately 1 hour, although it sometimes takes more than an hour to complete.</p><p>Previously, TE has been assessed subjectively by trained physicians through visual inspection&#x2013;based emesis indicators, such as patient gestures that include nausea, discomfort, and distress and vomitus characteristics such as color, consistency, volume, and odor. Nevertheless, dependence on human knowledge may introduce observer bias and interindividual variability, affecting reliability in clinical outputs [<xref ref-type="bibr" rid="ref3">3</xref>]. Furthermore, extended therapy is often time-consuming and poses challenges for physicians in observing and documenting continuously, potentially creating inconsistency and reducing standardization. Studies suggest that human error is one of the most significant contributors to decreased productivity and quality. Multifactorial interaction among stress, repeated tasks, fatigue, and workplace circumstances impairs cognitive performance, attention, and decision-making accuracy [<xref ref-type="bibr" rid="ref4">4</xref>]. Similarly, the variable rapid nature of emesis creates difficulties in manual tracking due to the rapid onset of the process, thus limiting real-time monitoring.</p><p>Research highlights that the integration of artificial intelligence (AI) techniques in image analysis for clinical decision support showcases their potential in diverse areas. Today&#x2019;s health care practices have changed significantly with the help of AI and machine learning (ML), which help with more accurate diagnoses, custom interventions, and informed decisions [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. Previous studies have substantiated that the use of AI-assisted imaging yields superior accuracy and specificity compared to traditional methods [<xref ref-type="bibr" rid="ref7">7</xref>]. Further cutting-edge developments using YOLOv9, TensorFlow, and residual neural network (ResNet) models have yielded newer insights in scope for such assessment methods.</p><p>There have been many studies conducted on <italic>vamana karma</italic> focusing on its efficacy in diseases and biophysical parameters [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref10">10</xref>]. Presently, no study has been proposed to digitally analyze the procedure. This study has been designed to integrate for the first time an AI component for TE analysis. The primary objective is to support physicians in accurate interpretations of purification type and guiding patients to an appropriate postpurificatory diet. The primary aim is to assess appropriate signs and symptoms of TE through content analysis of vomitus by developing and validating an AI model using digital image processing and an AI algorithm. Additional objectives focus on developing a dataset of TE for training and validating an AI model, training the TE model for real-time assessment, and validating the performance of the developed AI model by comparing its assessments with those of expert Ayurvedic physicians.</p></sec><sec id="s1-2"><title>Research Imperative</title><p>In the traditional system of medicine, TE is a key intervention for body detoxification, which requires documentation of all evident changes during the process. Recent studies have underscored the adoption of the YOLOv9 approach for detecting abnormal crowd behavior to promote crowd safety [<xref ref-type="bibr" rid="ref11">11</xref>]. Correspondingly, another study used the same methodology in traffic hazards for public safety using the GC-YOLOv9 algorithm [<xref ref-type="bibr" rid="ref12">12</xref>]. Additionally, there are studies in which image processing was used for microscopic sputum analysis and the velocity distance support algorithm was used for sputum monitoring to detect phlegm stagnation [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Contemporarily, one study was conducted to automate emesis in <italic>Suncus murinus</italic> using a convolutional neural network and a self-attention mechanism [<xref ref-type="bibr" rid="ref15">15</xref>]. Such studies draw attention to developing a model that can automate emetic episodes in traditional sciences. Moreover, the convergence of such approaches remains unexplored. Leveraging existing technologies could enable real-time monitoring for automation, enhance reliability, and improve patient outcomes.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>This is a prospective, observational, single-center study aimed at developing and validating an AI-based framework for the objective assessment of <italic>vamana karma</italic> (TE), a classical purification therapy in Ayurveda. The study adheres to the Standards for Reporting Diagnostic Accuracy&#x2013;Artificial Intelligence reporting guidelines [<xref ref-type="bibr" rid="ref16">16</xref>]. It will ensure data handling with model development, validation, and performance. A checklist has been provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-2"><title>Study Setting</title><p>The study will be conducted at a tertiary Ayurveda research and teaching institute with an established <italic>panchakarma</italic> (bio-purification methods) center. A dedicated clinical setup will be designed to ensure uniform data collection across participants. The study workflow is depicted in <xref ref-type="fig" rid="figure1">Figure 1A</xref> and <xref ref-type="fig" rid="figure1">Figure 1B</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>(A) Schematic representation of the data collection setup during <italic>vamana karma</italic> showing patient positioning, camera placement, vomit collection, and green backdrop for optimal video capture. (B) Study workflow and timeline illustrating the sequential phases of the research process, including data collection, manual annotation, machine learning model development, system optimization using trained datasets, and user testing for performance validation.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e79875_fig01.png"/></fig><p>To achieve uniformity in data collection, we will adopt the following guidelines throughout the process:</p><list list-type="bullet"><list-item><p>A monochromatic green backdrop will be used to optimize background segmentation for video analytics.</p></list-item><list-item><p>Participants who undergo TE will be dressed in a green gown for the same purpose.</p></list-item><list-item><p>Standardized artificial lighting will ensure uniform illumination and minimize the effect of varying light conditions.</p></list-item><list-item><p>High-definition cameras (minimum 1080p resolution) will be positioned at forehead level facing the participant to record both the vomitus events and the patients&#x2019; facial expressions.</p></list-item><list-item><p>Vomitus will be collected in transparent jars placed beneath the <italic>vamana karma</italic> chair setup.</p></list-item></list></sec><sec id="s2-3"><title>Participants</title><p>After providing informed consent, adult participants aged between 18 and 65 years will be enrolled in the study. Inclusion and exclusion criteria will be strictly adhered to for participant safety and data reliability. Inclusion criteria are adults clinically recommended for TE as per classic Ayurvedic guidelines for <italic>kapha</italic>-dominant conditions such as asthma, skin diseases, and metabolic disorders. Participants must be willing to consent to video recording during the procedure. Exclusion criteria include individuals with contraindications for <italic>vamana karma</italic>, including pregnancy, lactation, cardiac conditions, anemia, or known hypersensitivity to emetic medicine. Individuals unable or unwilling to provide informed consent will also be excluded.</p></sec><sec id="s2-4"><title>Sample Size</title><p>A total of 40 high-resolution videos (approximately 70 minutes each) were used, yielding over 10,000 frames annotated for vomit detection. This dataset size was considered adequate for training a deep learning model given the high temporal density of labeled events and the pretrained backbone used (YOLOv9 and ResNet). A preliminary learning curve analysis indicated performance saturation beyond this dataset size. Thus, for this study, we targeted 50 participants.</p></sec><sec id="s2-5"><title>Clinical Phases of TE</title><p>The complete process will be conducted in 3 phases, as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>. Phase 1, also known as the preparatory phase (<italic>purva karma</italic>), starts with the administration of <italic>deepana</italic> (digestive stimulants) and <italic>pachana</italic> (digestive agents), followed by <italic>snehapana</italic> (internal oleation) for 5 to 7 days accompanied by <italic>abhyanga</italic> (massage) and <italic>svedana</italic> (sudation) for 1 day. In this 1-day period, a <italic>kapha</italic>-aggravating diet will be administered. Thereafter, the second or main phase (<italic>pradhana karma</italic>) starts on the <italic>vamana karma</italic> day with <italic>abhyanga</italic> (massage) and <italic>svedana</italic> (sudation). Participants will be allowed to take the following emetic formulations early in the morning: (1) rice gruel (consistency, color, and texture), (2) milk (2 L), (3) 8 g of medicine (madanphala powder; <italic>Randia dumetorum</italic> Linn.)+5 g of madhuyashti powder (<italic>Glycyrrhiza glabra</italic> Linn.)+2 g of vacha powder (<italic>Acorus calamus</italic> Linn.)+5 g of salt+honey (q.s.), (4) 30 g of madhuyashti kwath (decoction of <italic>G glabra</italic> Linn.) powder dissolved in 1 L of hot water, and (5) salt water (20 g of salt dissolved in 1 L of water; 1 glass=380 mL). The number, content, and force of vomitus bouts will be recorded. Vomit will be collected in calibrated jars for visual analysis and recorded via video.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Procedural phases of <italic>vamana karma</italic>, including preparatory (<italic>deepana</italic>-<italic>pachana</italic>), internal oleation (<italic>snehapana</italic>), and rest period showing diet and therapeutic interventions.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e79875_fig02.png"/></fig><p>In the third or postprocedure phase (<italic>paschat karma</italic>), after completion of <italic>vamana karma</italic>, depending on the type of purification, the participants will be gradually shifted to <italic>samsarjana karma</italic> (postdetoxification dietary regimen) to normalize the weak digestive fire.</p><p>Video data will be captured throughout the procedure to record vomitus events, facial expressions, and physical gestures. The data will form the primary source for model training and validation.</p></sec><sec id="s2-6"><title>Data Annotation</title><p>Video data will be processed through frame extraction and manual annotation to create labeled datasets for ML. More than 11,000 frames will be annotated for vomitus event detection, whereas approximately 700 to 800 cropped images will be classified as per classic parameters. All these annotations will serve as the ground truth for supervised learning and will be validated by experienced Ayurveda physicians to ensure accuracy. Data labeling details are listed in <xref ref-type="table" rid="table1">Table 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Outcome parameters used for analysis and manual annotation. The outcome parameters constitute the core analytical content of the study and were derived from predefined clinical variables. All parameters were manually annotated and labeled by trained reviewers to ensure consistency and accuracy of the dataset used for analysis.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Outcome parameters</td><td align="left" valign="bottom">Labeled and description</td></tr></thead><tbody><tr><td align="left" valign="top">Milk</td><td align="left" valign="top">Liquid, thin consistency and white color</td></tr><tr><td align="left" valign="top">Yavagu (rice milk, jaggery, and ghee)</td><td align="left" valign="top">Thick consistency; rice particles of brown color mixed with milk</td></tr><tr><td align="left" valign="top">Phanta</td><td align="left" valign="top">Water flow, thin consistency, and light yellow-brown color</td></tr><tr><td align="left" valign="top">Medicine</td><td align="left" valign="top">Thick pastelike consistency, less viscous, dark brown color, and less quantity with decreased flow</td></tr><tr><td align="left" valign="top">Salt water</td><td align="left" valign="top">Normal flow, watery consistency, and transparent</td></tr><tr><td align="left" valign="top">Pitta</td><td align="left" valign="top">Yellow color, thick consistency, normal flow, and much lower quantity</td></tr><tr><td align="left" valign="top">Facial expression analysis</td><td align="left" valign="top">The patients' facial gestures will be analyzed by mapping various features of the face, such as the eyebrows, eyes, and mouth, to the emotions of anger, fear, surprise, sadness, and happiness</td></tr><tr><td align="left" valign="top" colspan="2">Variations in the labeling of parameters</td></tr><tr><td align="left" valign="top">Phanta+yavagu</td><td align="left" valign="top">Light yellowish-brown color and rice particles</td></tr><tr><td align="left" valign="top">Salt water+milk+medicine</td><td align="left" valign="top">Transparent; brown particles with a slight white color</td></tr><tr><td align="left" valign="top">Pitta+salt water</td><td align="left" valign="top">Water of a yellowish color</td></tr><tr><td align="left" valign="top">Pitta+medicine+rice</td><td align="left" valign="top">Yellow color, brown particle&#x2013;like consistency, and rice particles</td></tr><tr><td align="left" valign="top">Pitta</td><td align="left" valign="top">Light yellow color</td></tr><tr><td align="left" valign="top">Rice particles</td><td align="left" valign="top">Thick consistency&#x2014;particles the size of a small dot</td></tr><tr><td align="left" valign="top">Salt water+medicine</td><td align="left" valign="top">Water of a light brown color</td></tr><tr><td align="left" valign="top">Rice particles+medicine particles</td><td align="left" valign="top">Light brown color; medicine particles the size of minute dots</td></tr><tr><td align="left" valign="top">Milk mixed with medicine</td><td align="left" valign="top">Light brownish color</td></tr><tr><td align="left" valign="top">Milk+phanta</td><td align="left" valign="top">White color with light brown texture</td></tr><tr><td align="left" valign="top">Phanta+milk</td><td align="left" valign="top">Light yellowish-brown color; small milk particles</td></tr><tr><td align="left" valign="top">Phanta+pitta</td><td align="left" valign="top">Yellow color with a light yellowish-brown&#x2013;colored liquid</td></tr><tr><td align="left" valign="top">Phanta+medicine</td><td align="left" valign="top">Light yellowish-brown color</td></tr><tr><td align="left" valign="top">Medicine+milk</td><td align="left" valign="top">White brown color; liquid, slightly thick consistency; and small particles with a white structure</td></tr><tr><td align="left" valign="top">Milk mixed with medicine</td><td align="left" valign="top">White appearance with a brown color texture</td></tr><tr><td align="left" valign="top">Milk+salt water</td><td align="left" valign="top">Transparent with white appearance</td></tr><tr><td align="left" valign="top">Phanta+medicine</td><td align="left" valign="top">Light yellowish-brown color, dark color, and more particles also present</td></tr><tr><td align="left" valign="top">Salt water+rice particles</td><td align="left" valign="top">Transparent water; rice particles appear dotted</td></tr><tr><td align="left" valign="top">Blood+milk</td><td align="left" valign="top">Red color mixed with white particles</td></tr></tbody></table></table-wrap></sec><sec id="s2-7"><title>ML Pipeline</title><sec id="s2-7-1"><title>Overview</title><p>The AI framework will integrate several ML components, as shown in <xref ref-type="fig" rid="figure3">Figure 3</xref>.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Flow diagram of input-output mapping for vomitus classification based on image analysis and machine learning data training pipeline.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="resprot_v15i1e79875_fig03.png"/></fig></sec><sec id="s2-7-2"><title>Object Detection</title><p>A YOLOv9 deep learning model will be trained to detect and localize vomitus streams in real-time video frames. The model will be trained on the annotated images and validated using k-fold cross-validation. Key performance metrics will include detection accuracy and precision-recall scores.</p></sec><sec id="s2-7-3"><title>Content Classification</title><p>A convolutional neural network based on ResNet architecture will be developed for vomitus content classification. The model will categorize each vomitus image into one of the predefined classes. Data augmentation techniques will be applied to increase dataset variability and improve generalization.</p></sec><sec id="s2-7-4"><title>Facial Expression and Gesture Analysis</title><p>Facial expression analysis will be conducted using DeepFace (Meta Platforms), an open-source facial recognition and analysis framework. Expressions such as disgust, sadness, tiredness, and anger will be detected. Physical gestures, including hand and eye movements, will also be analyzed using pose estimation models.</p></sec><sec id="s2-7-5"><title>Temporal Analysis</title><p>Detected events, classified vomitus types, and facial expressions will be time-stamped and mapped across the procedure timeline. This will enable the correlation of emesis patterns and patient responses, facilitating a structured understanding of the <italic>vamana karma</italic> process.</p></sec></sec><sec id="s2-8"><title>Statistical Analysis</title><p>Descriptive statistics will summarize participant demographics and baseline characteristics. ML model performance will be reported using accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, and area under the receiver operating characteristic curve.</p><p>Model validation will use 5-fold cross-validation techniques to ensure robustness. Agreement between AI outputs and physician assessments will be evaluated using the Fleiss &#x03BA; with standard interpretation thresholds of &#x003C;0.40 for poor agreement, 0.40 to 0.75 for fair to good agreement, and 0.75 for excellent agreement.</p><p>All analyses will be conducted using Python libraries (scikit-learn [Google Summer of Code] and TensorFlow [Google Brain Team]) and the SPSS software (IBM Corp) for statistical analysis.</p><p>After training both the YOLOv9 and ResNet models, testing will be conducted in the following structured way. The dataset will be divided into 70% for training, 15% for validation, and 15% for testing. The test set will consist of unseen video samples containing vomiting and nonvomiting events to ensure unbiased evaluation. The YOLOv9 model will be tested on the test dataset to detect vomit regions frame by frame, output bounding boxes, and confidence scores. To test the complete pipeline, the full video will be passed through YOLOv9 followed by ResNet. The pipeline will be evaluated on frame-level accuracy (correct vomit identification across frames), event-level accuracy (correct identification of full vomiting events), and processing time per frame (frames per second) to measure real-time feasibility.</p><p>K-fold cross-validation (eg, <italic>K</italic>=5) will be performed to verify model robustness and reduce overfitting. Additionally, to prevent data leakage, participant-level splitting will be used during model evaluation. All frames from a given participant will be kept within a single fold to ensure that no individual&#x2019;s data appear in both the training and test sets. This ensures realistic generalization performance.</p><p>To enhance clinical interpretability, gradient-weighted class activation mapping visualization will be explored on the ResNet classifier outputs to highlight the image regions contributing to vomit type classification. This step supports explainability and builds clinician trust in the AI&#x2019;s decisions.</p></sec><sec id="s2-9"><title>Outcome Measures</title><p>The primary outcome is accuracy (or <italic>F</italic><sub>1</sub>-score) of vomit event detection and classification at the participant level. Secondary outcomes are inference time per frame and classification consistency across participants.</p></sec><sec id="s2-10"><title>Ethical Considerations</title><p>Ethics approval for the study was obtained from the Institutional Ethics Committee of All India Institute of Ayurveda, New Delhi, India before initiation (325/19.12.2022/PhD-04/2022 [reissued in 2023]). All participants will provide written informed consent before recording after being informed about the study objectives, the voluntary nature of participation, and their right to withdraw at any time without consequences. Videos will be anonymized by removing identifiable facial features and stored in encrypted format (Advanced Encryption Standard&#x2013;256) on a secure local server with access restricted to authorized researchers.</p></sec><sec id="s2-11"><title>Cultural Considerations</title><p>While the potential of AI integration in Ayurveda is significant, it also raises ethical and cultural challenges. AI-based evaluation of deeply traditional and personalized procedures may be viewed with skepticism by practitioners who value individualized clinical judgment. Hence, AI tools should be positioned as decision support systems rather than replacements for practitioner expertise. Transparent algorithms, patient consent, and data privacy must remain central to any implementation.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>The study was initiated in January 2024 and is currently ongoing. Participant enrollment and video data collection began in February 2024, with a target of enrolling 50 patients by March 2025. Procedures for annotating data and training ML models are being carried out. Data analysis is scheduled to commence in May 2025, and the final results are expected to be available by December 2025.</p></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Expected Findings</title><p>The study seeks to test a novel way to assess TE by using an AI model. The proposed framework will evaluate the outcomes more accurately and ease for the physician by automating the vomiting event. Once validated, this framework could serve as a model for the application of AI in other traditional medicine procedures, thereby contributing to the broader field of integrative and digital health. Previous studies in medical imaging and diagnostic AI have established the capacity of ML algorithms to identify subtle patterns in biomedical data&#x2014;such as radiographs, endoscopic images, and pathology slides&#x2014;that often escape human perception. In traditional systems of medicine, similar AI-based models have been used for pulse diagnosis, tongue image interpretation, and facial feature analysis, suggesting the need for deployment [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref21">21</xref>]. However, no prior work has attempted this in the domain of vomitus analysis, leaving a critical research gap that needs to be addressed. The framework can be generalized to other <italic>panchakarma</italic> procedures or Ayurvedic practices involving visual inspection provided that adequate domain-specific annotations are available. Its modular design allows for retraining with site-specific data for broader clinical adoption. The study establishes the foundation for image-based pattern recognition models that could one day assist practitioners in evaluating the <italic>shuddhi lakshana</italic> (signs of purification) objectively.</p></sec><sec id="s4-2"><title>Strengths of the Study</title><p>The study will help enhance diagnostic precision and documentation. Moreover, it enables longitudinal data collection that may reveal new correlations among patient profiles, therapy outcomes, and procedural parameters, which were otherwise difficult to quantify manually.</p></sec><sec id="s4-3"><title>Limitations</title><p>The study&#x2019;s shortcomings include a small sample size, possible observer bias in vomitus labeling, and limited generalizability due to single-center data. Data will be collected in a controlled clinical setting, limiting real-world diversity. TE is a seasonal therapy traditionally administered during specific periods of the year, which naturally limits the availability of eligible participants and, consequently, the sample size for ML model development and validation. Furthermore, the digital interpretability of Ayurvedic features such as <italic>pitta</italic> (bio humor responsible for metabolic activities) and <italic>aushadhi</italic> (medicine) remains challenging. Despite these limitations, the study is an important step toward developing a validated framework for combining AI with traditional assessments. More comprehensive multicentric research is necessary to validate the proposed concept across diverse populations and practice settings.</p></sec><sec id="s4-4"><title>Future Directions</title><p>Future work will focus on validating the framework across larger, more diverse patient populations and exploring its application in real-time clinical decision-making. Although this work focuses on model development and internal validation, future studies will involve clinical validation across multiple <italic>panchakarma</italic> centers to assess model robustness under real-world variations in lighting, camera positioning, and practitioner styles.</p></sec><sec id="s4-5"><title>Conclusions</title><p>This work presents one of the first attempts to apply deep learning for objective analysis of the <italic>vamanakarma</italic> process in Ayurveda. By combining YOLOv9 for vomit detection and ResNet for classification, the framework achieves promising accuracy in automated vomit identification. The results will demonstrate the potential of AI-assisted analysis in traditional medicine, although further clinical validation and expansion across multiple centers will be necessary before deployment in real-world settings.</p></sec></sec></body><back><ack><p>The authors would like to express their sincere gratitude to the All India Institute of Ayurveda for providing the infrastructure and funding support required to carry out this research. All authors declared that they had insufficient funding to support open access publication of this manuscript, including from affiliated organizations or institutions, funding agencies, or other organizations. JMIR Publications provided article processing fee support for the publication of this article. The authors acknowledge Dr Neha Chauhan for her involvement in the initial phase of the study. The authors declare that no generative artificial intelligence or artificial intelligence&#x2013;assisted technologies were used in the preparation of this manuscript.</p></ack><notes><sec><title>Funding</title><p>This research was supported by the All India Institute of Ayurveda, New Delhi, India.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: APV, PR</p><p>Formal analysis: ARG, SS, RD</p><p>Methodology: PR</p><p>Project administration: APV, SK</p><p>Resources: APV, PR, SK</p><p>Visualization: ARG, RD, SK, SS</p><p>Writing&#x2014;original draft: PR</p><p>Writing&#x2014;review and editing: APV, RD, SS</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb3">ResNet</term><def><p>residual neural network</p></def></def-item><def-item><term id="abb4">TE</term><def><p>therapeutic emesis</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Acharya</surname><given-names>VY</given-names> </name></person-group><source>Caraka Samhita (Cakapani Ayurveda Dipika)</source><year>2015</year><publisher-name>Chaukhambha Orientalia</publisher-name></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="web"><source>Charak Samhita New Edition</source><access-date>2026-01-31</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.carakasamhitaonline.com/index.php?title=Main_Page">https://www.carakasamhitaonline.com/index.php?title=Main_Page</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="book"><person-group person-group-type="editor"><name name-style="western"><surname>Acharya</surname><given-names>Vaidya Jadavaji Trikamji</given-names> </name></person-group><source>The Charakasamhita by Agnives'a</source><year>1941</year><access-date>2026-01-31</access-date><publisher-name>Nirnaya Sagar Press</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://archive.org/details/jTZt_charaka-samhita-sanskrit-of-agnivesha-with-ayurveda-dipika-commentary-ed.-by-vai">https://archive.org/details/jTZt_charaka-samhita-sanskrit-of-agnivesha-with-ayurveda-dipika-commentary-ed.-by-vai</ext-link></comment></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yeow</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Ng</surname><given-names>PK</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>KS</given-names> </name><name name-style="western"><surname>Chin</surname><given-names>TS</given-names> </name><name name-style="western"><surname>Lim</surname><given-names>WY</given-names> </name></person-group><article-title>Effects of stress, repetition, fatigue and work environment on human error in manufacturing industries</article-title><source>J Appl Sci</source><year>2014</year><month>12</month><day>1</day><volume>14</volume><issue>24</issue><fpage>3464</fpage><lpage>3471</lpage><pub-id pub-id-type="doi">10.3923/jas.2014.3464.3471</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khalifa</surname><given-names>M</given-names> </name><name name-style="western"><surname>Albadawy</surname><given-names>M</given-names> </name></person-group><article-title>AI in diagnostic imaging: revolutionising accuracy and efficiency</article-title><source>Comput Methods Programs Biomed Update</source><year>2024</year><volume>5</volume><fpage>100146</fpage><pub-id pub-id-type="doi">10.1016/j.cmpbup.2024.100146</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Al-Obeidat</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hafez</surname><given-names>W</given-names> </name><name name-style="western"><surname>Gador</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Diagnostic performance of AI-based models versus physicians among patients with hepatocellular carcinoma: a systematic review and meta-analysis</article-title><source>Front Artif Intell</source><year>2024</year><volume>7</volume><fpage>1398205</fpage><pub-id pub-id-type="doi">10.3389/frai.2024.1398205</pub-id><pub-id pub-id-type="medline">39224209</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nagareddy</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vadlamani</surname><given-names>R</given-names> </name><name name-style="western"><surname>Venkannagari</surname><given-names>NR</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>S</given-names> </name><name name-style="western"><surname>Basheer</surname><given-names>SN</given-names> </name><name name-style="western"><surname>Murugesan</surname><given-names>S</given-names> </name></person-group><article-title>Comparison of the artificial intelligence versus traditional radiographic interpretation in detecting periapical periodontitis: a diagnostic accuracy study</article-title><source>J Pharm Bioallied Sci</source><year>2024</year><month>12</month><volume>16</volume><issue>Suppl 4</issue><fpage>S3676</fpage><lpage>S3678</lpage><pub-id pub-id-type="doi">10.4103/jpbs.jpbs_1096_24</pub-id><pub-id pub-id-type="medline">39926764</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>N</surname><given-names>S</given-names> </name><name name-style="western"><surname>D</surname><given-names>P</given-names> </name></person-group><article-title>A practical approach towards understanding vamana karma: a review on unique detoxification therapy</article-title><source>Int J Res Ayurveda Pharm</source><year>2024</year><volume>15</volume><issue>5</issue><fpage>91</fpage><lpage>96</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://ijrap.net/index.php/login/currentissue">https://ijrap.net/index.php/login/currentissue</ext-link></comment><pub-id pub-id-type="doi">10.7897/2277-4343.155162</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kumari</surname><given-names>K</given-names> </name><name name-style="western"><surname>Bhatted</surname><given-names>SK</given-names> </name></person-group><article-title>Evaluation of safety and efficacy of vamana karma in healthy and diseased individuals: a systemic review</article-title><source>J Indian Syst Med</source><year>2024</year><volume>12</volume><issue>3</issue><fpage>129</fpage><lpage>134</lpage><pub-id pub-id-type="doi">10.4103/jism.jism_5_23</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Supriya</surname><given-names>C</given-names> </name><name name-style="western"><surname>Thakur</surname><given-names>N</given-names> </name><name name-style="western"><surname>Kaushik</surname><given-names>P</given-names> </name><etal/></person-group><article-title>A clinical study to assess the role of vasantika vamana on lipid profile and weight in healthy individuals</article-title><source>Int J Ayu Pharm Res</source><year>2023</year><fpage>104</fpage><lpage>112</lpage><pub-id pub-id-type="doi">10.47070/ijapr.v11i10.2939</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alsabei</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Alsubait</surname><given-names>TM</given-names> </name><name name-style="western"><surname>Alhakami</surname><given-names>HH</given-names> </name></person-group><article-title>Enhancing crowd safety at Hajj: real-time detection of abnormal behavior using YOLOv9</article-title><source>IEEE Access</source><year>2025</year><volume>13</volume><fpage>37748</fpage><lpage>37761</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2025.3545256</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>An</surname><given-names>R</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>G</given-names> </name></person-group><article-title>GC-YOLOv9: innovative smart city traffic monitoring solution</article-title><source>Alex Eng J</source><year>2024</year><month>11</month><volume>106</volume><fpage>277</fpage><lpage>287</lpage><pub-id pub-id-type="doi">10.1016/j.aej.2024.07.004</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Azman</surname><given-names>FI</given-names> </name><name name-style="western"><surname>Ghazali</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Mohamed</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Hamid</surname><given-names>R</given-names> </name></person-group><article-title>Detection of sputum smear cell based on image processing analysis</article-title><source>ARPN J Eng Appl Sci</source><year>2015</year><access-date>2025-05-25</access-date><volume>10</volume><fpage>9880</fpage><lpage>9884</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.arpnjournals.org/jeas/research_papers/rp_2015/jeas_1115_2999.pdf">https://www.arpnjournals.org/jeas/research_papers/rp_2015/jeas_1115_2999.pdf</ext-link></comment></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>X</given-names> </name></person-group><article-title>A phlegm stagnation monitoring based on VDS algorithm</article-title><source>J Healthc Eng</source><year>2020</year><volume>2020</volume><fpage>8714070</fpage><pub-id pub-id-type="doi">10.1155/2020/8714070</pub-id><pub-id pub-id-type="medline">32399167</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Qiao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>X</given-names> </name><etal/></person-group><article-title>A deep learning-based system for automatic detection of emesis with high accuracy in suncus murinus</article-title><source>Commun Biol</source><year>2025</year><month>02</month><day>10</day><volume>8</volume><issue>1</issue><fpage>209</fpage><pub-id pub-id-type="doi">10.1038/s42003-025-07479-0</pub-id><pub-id pub-id-type="medline">39930110</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sounderajah</surname><given-names>V</given-names> </name><name name-style="western"><surname>Ashrafian</surname><given-names>H</given-names> </name><name name-style="western"><surname>Golub</surname><given-names>RM</given-names> </name><etal/></person-group><article-title>Developing a reporting guideline for artificial intelligence-centred diagnostic test accuracy studies: the STARD-AI protocol</article-title><source>BMJ Open</source><year>2021</year><month>06</month><day>28</day><volume>11</volume><issue>6</issue><fpage>e047709</fpage><pub-id pub-id-type="doi">10.1136/bmjopen-2020-047709</pub-id><pub-id pub-id-type="medline">34183345</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kumar</surname><given-names>PV</given-names> </name><name name-style="western"><surname>Deshpande</surname><given-names>S</given-names> </name><name name-style="western"><surname>Nagendra</surname><given-names>HR</given-names> </name></person-group><article-title>Traditional practices and recent advances in nadi pariksha: a comprehensive review</article-title><source>J Ayurveda Integr Med</source><year>2019</year><volume>10</volume><issue>4</issue><fpage>308</fpage><lpage>315</lpage><pub-id pub-id-type="doi">10.1016/j.jaim.2017.10.007</pub-id><pub-id pub-id-type="medline">30100236</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kurande</surname><given-names>V</given-names> </name><name name-style="western"><surname>Waagepetersen</surname><given-names>R</given-names> </name><name name-style="western"><surname>Toft</surname><given-names>E</given-names> </name><name name-style="western"><surname>Prasad</surname><given-names>R</given-names> </name><name name-style="western"><surname>Raturi</surname><given-names>L</given-names> </name></person-group><article-title>Repeatability of pulse diagnosis and body constitution diagnosis in traditional Indian ayurveda medicine</article-title><source>Glob Adv Health Med</source><year>2012</year><month>11</month><volume>1</volume><issue>5</issue><fpage>36</fpage><lpage>42</lpage><pub-id pub-id-type="doi">10.7453/gahmj.2012.1.5.011</pub-id><pub-id pub-id-type="medline">27257530</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Deep learning multi-label tongue image analysis and its application in a population undergoing routine medical checkup</article-title><source>Evid Based Complement Alternat Med</source><year>2022</year><volume>2022</volume><fpage>3384209</fpage><pub-id pub-id-type="doi">10.1155/2022/3384209</pub-id><pub-id pub-id-type="medline">36212950</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>P</given-names> </name><etal/></person-group><article-title>A survey of artificial intelligence in tongue image for disease diagnosis and syndrome differentiation</article-title><source>Digit Health</source><year>2023</year><volume>9</volume><fpage>20552076231191044</fpage><pub-id pub-id-type="doi">10.1177/20552076231191044</pub-id><pub-id pub-id-type="medline">37559828</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tiwari</surname><given-names>P</given-names> </name><name name-style="western"><surname>Kutum</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sethi</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Recapitulation of ayurveda constitution types by machine learning of phenotypic traits</article-title><source>PLoS ONE</source><year>2017</year><volume>12</volume><issue>10</issue><fpage>e0185380</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0185380</pub-id><pub-id pub-id-type="medline">28981546</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Supplementary table S1.</p><media xlink:href="resprot_v15i1e79875_app1.docx" xlink:title="DOCX File, 20 KB"/></supplementary-material></app-group></back></article>