<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "https://jats.nlm.nih.gov/publishing/1.3/JATS-journalpublishing1-3.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="1.3" article-type="research-article" xml:lang="en"><front><journal-meta><journal-id journal-id-type="issn">2656-2804</journal-id><journal-title-group><journal-title>Indonesian Journal on Learning and Advanced Education (IJOLAE)</journal-title><abbrev-journal-title>ijolae</abbrev-journal-title></journal-title-group><issn pub-type="epub">2656-2804</issn><issn pub-type="ppub">2655-920X</issn><publisher><publisher-name>Universitas Muhammadiyah Surakarta</publisher-name></publisher></journal-meta><article-meta><article-id pub-id-type="doi">10.23917/ijolae.v6i3.23675</article-id><article-categories/><title-group><article-title>Transformative Practices: Integrating Automated Writing Evaluation in Higher Education Writing Classrooms - A Systematic Review</article-title></title-group><contrib-group><contrib contrib-type="author"><name><surname>Astutik</surname><given-names>Indri</given-names></name><address><country>Indonesia</country></address><xref ref-type="aff" rid="AFF-1"/></contrib><contrib contrib-type="author"><name><surname>Widiati</surname><given-names>Utami</given-names></name><address><country>Indonesia</country></address><xref ref-type="aff" rid="AFF-1"/></contrib><contrib contrib-type="author"><name><surname>Ratri</surname><given-names>Devinta Puspita</given-names></name><address><country>Indonesia</country></address><xref ref-type="aff" rid="AFF-1"/></contrib><contrib contrib-type="author"><name><surname>Jonathans</surname><given-names>Peggy Magdalena</given-names></name><address><country>Indonesia</country></address><xref ref-type="aff" rid="AFF-1"/></contrib><contrib contrib-type="author"><name><surname>Nurkamilah</surname><given-names>Nurkamilah</given-names></name><address><country>United States</country><email>nurkamilah@unmuhjember.ac.id</email></address><xref ref-type="aff" rid="AFF-2"/><xref ref-type="corresp" rid="cor-4"/></contrib><contrib contrib-type="author"><name><surname>Devanti</surname><given-names>Yeni Mardiyana</given-names></name><address><country>Indonesia</country></address><xref ref-type="aff" rid="AFF-3"/></contrib><contrib contrib-type="author"><name><surname>Harfal</surname><given-names>Zaldi</given-names></name><address><country>United States</country></address><xref ref-type="aff" rid="AFF-4"/></contrib></contrib-group><aff id="AFF-1"><institution content-type="dept">Faculty of Letters</institution><institution-wrap><institution>Universitas Negeri Malang</institution><institution-id institution-id-type="ror">https://ror.org/00ypgyy34</institution-id></institution-wrap><country country="ID">Indonesia</country></aff><aff id="AFF-2">College of Education, the Pennsylvania State University</aff><aff id="AFF-3"><institution content-type="dept">Faculty of Teacher Training and Education</institution><institution-wrap><institution>Universitas Muhammadiyah Jember</institution><institution-id institution-id-type="ror">https://ror.org/021p32893</institution-id></institution-wrap><country country="ID">Indonesia</country></aff><aff id="AFF-4"><institution content-type="dept">Warner School of Education</institution><institution-wrap><institution>University of Rochester</institution><institution-id institution-id-type="ror">https://ror.org/022kthw22</institution-id></institution-wrap><country country="US">United States</country></aff><author-notes><corresp id="cor-4"><bold>Corresponding author:  Nurkamilah Nurkamilah</bold>, College of Education, the Pennsylvania State University .Email:<email>nurkamilah@unmuhjember.ac.id</email></corresp></author-notes><pub-date date-type="pub" iso-8601-date="2024-9-20" publication-format="electronic"><day>20</day><month>9</month><year>2024</year></pub-date><pub-date date-type="collection" iso-8601-date="2024-11-22" publication-format="electronic"><day>22</day><month>11</month><year>2024</year></pub-date><fpage>423</fpage><lpage>441</lpage><history><date date-type="received" iso-8601-date="2024-5-15"><day>15</day><month>5</month><year>2024</year></date><date date-type="rev-recd" iso-8601-date="2024-8-16"><day>16</day><month>8</month><year>2024</year></date><date date-type="accepted" iso-8601-date="2024-8-27"><day>27</day><month>8</month><year>2024</year></date></history><permissions><copyright-statement>Copyright (c) 2024 Indri Astutik, Utami Widiati, Devinta Puspita Ratri, Peggy Magdalena Jonathans, Nurkamilah Nurkamilah, Yeni Mardiyana Devanti, Zaldi Harfal</copyright-statement><copyright-year>2024</copyright-year><copyright-holder>Indri Astutik, Utami Widiati, Devinta Puspita Ratri, Peggy Magdalena Jonathans, Nurkamilah Nurkamilah, Yeni Mardiyana Devanti, Zaldi Harfal</copyright-holder><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><ali:license_ref xmlns:ali="http://www.niso.org/schemas/ali/1.0/">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This work is licensed under a Creative Commons Attribution 4.0 International License.</license-p></license></permissions><self-uri xlink:href="https://journals2.ums.ac.id/ijolae/article/view/9104" xlink:title="Transformative Practices: Integrating Automated Writing Evaluation in Higher Education Writing Classrooms - A Systematic Review">Transformative Practices: Integrating Automated Writing Evaluation in Higher Education Writing Classrooms - A Systematic Review</self-uri><abstract><p>This systematic literature review explores the utilization of Automated Writing Evaluation (AWE) as a writing scoring tool over a five-year period from 2016 to 2020, focusing on its role in the transformation and integration of learning tools for pedagogical purposes. Transformation refers to the significant changes and advancements in teaching methods, particularly in adapting to new educational technologies and approaches, while integration involves the seamless incorporation of AWE systems into these evolving instructional practices to enhance the effectiveness of writing instruction. The study aims to analyze the various types of AWE employed in academic research, track trends in AWE technology strategies, and investigate students’ perceptions of AWE in both scoring and instructional contexts. Additionally, it aims to uncover the benefits and limitations associated with AWE implementation in writing instruction. Examining 19 journal articles, this review identifies fourteen types of AWE utilized by researchers and tracks advancements in machine learning within the field. The findings reveal positive student perceptions of AWE, citing its usefulness, efficiency, and linguistic accuracy in scoring and instruction. Benefits of AWE implementation include improved linguistic accuracy, enhanced writing performance, increased student engagement, and the provision of reliable and valid feedback. Moreover, AWE demonstrates effectiveness in scoring and feedback provision, with potential short- and long-term effects on student learning. However, limitations of AWE are also noted, including student distrust of feedback and a preference for human raters over AWE-generated scores. This review provides valuable insights into the multifaceted role of AWE in writing instruction, highlighting its potential benefits and areas for improvement.</p></abstract><kwd-group><kwd>automated writing evaluation</kwd><kwd>educational technology</kwd><kwd>employs holistic</kwd><kwd>integration of learning</kwd><kwd>student engagement</kwd><kwd>transformation learning</kwd><kwd>transformative pratice</kwd></kwd-group><custom-meta-group><custom-meta><meta-name>File created by JATS Editor</meta-name><meta-value><ext-link ext-link-type="uri" xlink:href="https://jatseditor.com" xlink:title="JATS Editor">JATS Editor</ext-link></meta-value></custom-meta></custom-meta-group></article-meta></front><body><sec><title>1. Introduction</title><p>A good learning process can improve the quality of education <xref ref-type="bibr" rid="BIBR-1">(Abidin et al., 2024)</xref>. The effects of Information Communication and Technology on education cannot be over emphasized <xref ref-type="bibr" rid="BIBR-26">(Onojah et al., 2021)</xref>. Technology has an important role in the world of education <xref ref-type="bibr" rid="BIBR-35">(Sulistyanto et al., 2022)</xref> The Tendency Towards Entrepreneurship… 2023). The integration of technology into language learning has significantly impacted classroom instruction and the assessment of learners' language proficiency. This trend has driven advancements in educational technology, leading software designers to develop and expand tools for assessing learners' receptive and productive skills. The inception of language assessment technology in the 1960s aimed to streamline the assessment process <xref ref-type="bibr" rid="BIBR-6">(Chapelle &amp; Voss, 2016)</xref>. This technology addresses several drawbacks of traditional paper-based testing, offering faster, more efficient, and cost-effective alternatives <xref ref-type="bibr" rid="BIBR-16">(Laborda, 2007)</xref>. Moreover, it improves the standardization of essay assessments and the provision of timely and valid feedback <xref ref-type="bibr" rid="BIBR-42">(Wang et al., 2020)</xref>.</p><p>Automated Writing Evaluation (AWE) is one popular manifestation of technology integration in writing assessment which uses computer systems generates scores and feedback automatically <xref ref-type="bibr" rid="BIBR-39">(Stevenson &amp; Phakiti, 2019)</xref>. It is widely employed in educational settings and standardized tests. High-stakes tests such as the Test of English as a Foreign Language (TOEFL) and the Graduate Management Admissions Test (GMAT) exemplify the utilization of AWE <xref ref-type="bibr" rid="BIBR-37">(Stevenson, 2016)</xref><xref ref-type="bibr" rid="BIBR-39">(Stevenson &amp; Phakiti, 2019)</xref>. These tests are proofs that the technology has provided an effective and efficient alternative to time-consuming and resource-intensive paper-based tests in educational settings.</p><p>However, controversies persist regarding the use of AWE, particularly in assessing productive skills like speaking and writing. Many researchers question the accuracy of scoring, the technology's feedback capabilities, and the implications of writing for a non-human audience <xref ref-type="bibr" rid="BIBR-37">(Stevenson, 2016)</xref>. Despite lingering doubts, AWE has found its way into writing classrooms to aid teachers and learners in evaluating writing competence and providing writing instruction. Numerous research studies and analyses have been conducted to explore the impact of AWE on learners' writing proficiency (<xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref><xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref><xref ref-type="bibr" rid="BIBR-21">(Lim &amp; Phua, 2019)</xref><xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref><xref ref-type="bibr" rid="BIBR-34">(Silva, 2017)</xref><xref ref-type="bibr" rid="BIBR-37">(Stevenson, 2016)</xref>). Stevenson, for example, emphasized that a key feature of AWE lies in its scoring engine, which utilizes techniques such as artificial intelligence, natural language processing, and semantic analysis to generate automated scores. Liao asserted that employing AWE to scaffold students' writing abilities led to a reduction in grammatical errors in L2 writing. Silva underscored AWE's pedagogical nature, noting its integration with the assessment development process and its role in scaffolding student learning. Liao further reported a significant improvement in learners' grammatical performance, indicating that AWE feedback prompted learners to interpret and internalize English grammatical rules through iterative revision processes. This integration of procedural skills ultimately facilitated learner automatization and long-term improvement. Additionally, Roscoe et al. found that learners perceived AWE as accurately scoring their writing and providing appropriate recommendations, thereby enhancing students' confidence in the scoring process.</p><p>Nevertheless, the consistency of Automated Writing Evaluation (AWE) in assessing learners' writing competence remains variable, even with teachers' intervention, particularly in feedback provision. This inconsistency stems from the design of AWE software, which often employs holistic scoring scales intended to provide scores reflecting overall text quality. Programs like My Access! and Write to Learn utilize holistic scoring scales, aiming to offer comprehensive scores. Although these programs are equipped to provide analytical scores for specific aspects of text quality, such as language use, organization, and mechanics, they are not infallible. AWE scoring engines can be prompt-specific or generic, with prompt-specific engines limited to evaluating texts written in response to trained prompts, thereby contributing to scoring inconsistencies <xref ref-type="bibr" rid="BIBR-39">(Stevenson &amp; Phakiti, 2019)</xref>. Additionally, inconsistencies may arise from scoring errors, where screeners fail to maintain consistent interpretation or apply scoring criteria uniformly <xref ref-type="bibr" rid="BIBR-12">(Godshalk et al., 1966)</xref><xref ref-type="bibr" rid="BIBR-46">(Wolfe, 2005)</xref>.</p><p>In this study, two types of feedback were employed: high-level (HL) writing skills, encompassing aspects such as ideas and elaboration, organization, style, and selffeedback directed at the author(s)' writing process or experience, and low-level (LL) writing skills, including spelling, capitalization, punctuation, sentence structure, grammar, formatting, and word choice. The study revealed that the utilization of AWE alongside teacher feedback did not significantly affect the provision of HL feedback, whereas teacheronly feedback resulted in a greater quantity of LL feedback compared to AWE + teacher feedback. Additionally, learners exhibited a tendency to revise LL feedback provided by teacher-only feedback more than that offered by AWE + teacher feedback. Interestingly, learners taught using AWE + teacher feedback demonstrated long-term retention of their accuracy improvement, while those taught using teacher-only feedback showed short-term retention of accuracy improvement <xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref>. However, <xref ref-type="bibr" rid="BIBR-45">(Wilson &amp; Czik, 2016)</xref> reported slightly different findings regarding HL feedback, indicating that students introduced to AWE received more HL feedback than LL feedback in the teacher-only-feedback condition. Hence, the efficacy of AWE intervention in writing assessment and classroom instruction remains open to question regarding its beneficial impact on learners' writing competence.</p><p>Given the varying findings across research studies and diverse applications in language classrooms, AWE is understandably a compelling and pertinent topic for research due to its technological innovations, efficiency, pedagogical benefits, ongoing research, and the controversies it faces. The current review, therefore, aims to clarify the controversies exist in the current studies by exploring a five-year-AWE practice as a tool for scoring English language learners' writing and writing classroom instruction dated from 2016 through 2020. Considering previous researchers' findings, the present research questions were formulated as "1) What types of AWE have researchers used from 2016 to 2020 as tools to score students' writing and provide writing classroom instruction? 2) What are students' perceptions of AWE used for scoring their writing and writing classroom instruction? 3) What are the benefits and limitations of using AWE for scoring students' writing and facilitating writing classroom instruction?</p><p>The present review is expected to contribute valuable insights for further research on AWE’s role in technology-driven writing assessment and classroom instruction. Additionally, it may serve as a resource for advancing AWE devices to aid teachers in providing valuable feedback beyond the capabilities of technology alone, thus facilitating learners’ independent improvement in writing ability.</p></sec><sec><title>2. Method</title><p>A comprehensive and systematic literature search was conducted to identify relevant primary sources for this review. The focus was on published journal articles from 2016 to 2020, capturing recent advancements and trends in Automated Writing Evaluation (AWE) systems. The selection of articles was not limited to a specific regional context, as technology is a global issue with widespread usage across countries worldwide. However, the search predominantly targeted studies within the higher education context. Both qualitative and quantitative research articles were included to provide a comprehensive understanding of the effectiveness, implementation, and perceptions of AWE in writing assessment and instruction. Opinion pieces, non-peer-reviewed articles, and studies outside the specified timeframe or educational context were excluded.</p><p>The search was conducted using the "ScienceDirect" database, which served as a systematic search engine across relevant journals from 2016 to 2020. The key terms employed in this study included "Automated Writing Evaluation and Students' Writing" OR "Automated Writing Evaluation and Writing Skills" OR "Automated Writing Evaluation and Higher Education Writing Skills." The search parameters were restricted to journal articles published between 2016 and 2020. Key journals in the fields of language learning technology, information writing, and education were selected for inclusion in the review.</p><p>The initial search conducted using “ScienceDirect” and the keywords “Automated Writing Evaluation and Students’ Writing” yielded a total of 10,223 articles. To narrow down the scope and focus on more recent and relevant research, the search was restricted to articles published between 2016 and 2020, resulting in 3,948 articles. Given the considerable volume of findings and to further hone in on specific aspects of writing skills, the search was refined using the keywords “Automated Writing Evaluation and English Writing Skill” within the same timeframe, which produced 1,001 journal articles. This refinement aimed to target studies that specifically addressed the evaluation of English writing skills, a critical area in higher education. Subsequently, the search terms were adjusted to “Automated Writing Evaluation and Higher Education Writing Skill,” resulting in 582 articles from various journals. This adjustment was made to ensure the focus remained on higher education contexts, which is the primary scope of this review. Finally, after thorough screening based on each year of publication and relevance to the research objectives, 19 articles were found to meet the criteria for inclusion in the study. The overall procedure can be seen in <xref ref-type="fig" rid="figure-1">Figure 1</xref>.</p><fig id="figure-1" ignoredToc=""><label>Figure 1</label><caption><p>Criteria and Procedure of Searching Literature</p></caption><graphic xlink:href="https://journals2.ums.ac.id/ijolae/article/download/9104/4397/51436" mimetype="image" mime-subtype="png"><alt-text>Image</alt-text></graphic></fig><p>Key information was extracted from each of the studies to contextualize the data regarding Automated Writing Evaluation (AWE) usage as both an assessment tool for writing and classroom instruction. This extraction process involved identifying and presenting essential details such as the authors’ names, publication year, product utilized, primary data source, participants involved, and the affiliation(s) of the author(s). The results of this analysis were compiled and presented in tabular form for clarity and ease of reference. For a better view of the extracted key information, please see <xref ref-type="table" rid="table-1">Table 1</xref>.</p><p>After extraction, the journal articles have undergone multiple readings and analyses to discern concepts, themes, and ideas pertaining to Automated Writing Evaluation (AWE) as both a scoring tool and as integrated into writing classroom instruction. Upon reading the articles carefully, the key concepts, themes, ideas, and other pertinent information extracted from the reviewed articles were organized into an analysis table, comprising categories such as title, author(s), journal publication, sampling, instrument/theory, findings, and notes. This information guided the results of the analysis, which in turn addressed the research questions and formed the findings of the literature review, which are presented in the Result and Discussion section.</p><table-wrap id="table-1" ignoredToc=""><label>Table 1</label><caption><p>Key Information of the Sample Studies</p></caption><table frame="box" rules="all"><thead><tr><th colspan="1" rowspan="1" style="" align="left" valign="top">Author(s)</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Year</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Product</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Primary Data Sources</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Respondents</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Author Affiliation</th></tr></thead><tbody><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Hui-Chuan Liao</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2016a</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><p>Essay composition</p><break/></td><td colspan="1" rowspan="1" style="" align="left" valign="top">66 Taiwanese university students</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><p>National Kaohsiung University of Applied Sciences in</p><p>Taiwan</p></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Svetlana Koltovskaia</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2020</td><td colspan="1" rowspan="1" style="" align="left" valign="top">AWCF of Grammarly</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Pre &amp; Post writing test</td><td colspan="1" rowspan="1" style="" align="left" valign="top">2 ESL college students</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Oklahoma State University, United States</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Zhi Li, Hui-Hsien Feng and Aysel Saricaoglu</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2017</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Essay writing test and interview</td><td colspan="1" rowspan="1" style="" align="left" valign="top">63 of intermediate -high-level participants &amp; 72 of advanced-low level participants of academic writing classes</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><p>Paragon Testing Enterprises, Inc;</p><p>Iowa State University; TED University, Turkey</p></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Hui-Chuan Liao</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2016b</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Essay writing and interview</td><td colspan="1" rowspan="1" style="" align="left" valign="top">63 participants</td><td colspan="1" rowspan="1" style="" align="left" valign="top">National Kaohsiung University of Applied Sciences Taiwan</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Stephanie Link, Mohaddeseh Mehrzad &amp; Mohammad Rahimi</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2020</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Pre, Post &amp; Delayed Post Essay writing test</td><td colspan="1" rowspan="1" style="" align="left" valign="top">32 participants of undergraduate English majors</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Oklahoma State University, USA &amp; Shiraz University, Iran</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Sha Liu &amp; Antony John Kunnan</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2016</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Four Human Raters &amp; WriteToLearn</td><td colspan="1" rowspan="1" style="" align="left" valign="top">326 students’ essays</td><td colspan="1" rowspan="1" style="" align="left" valign="top">163 participants of undergraduate EFL learners</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><p>China West Normal</p><p>University, China &amp; Nanyang Technological</p><p>University, Singapore</p></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Leyi Qian, Yali Zhao &amp; Yan Cheng</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2019</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Two expert raters &amp; iWrite</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Exposition, Argumentative &amp; narrative essays</td><td colspan="1" rowspan="1" style="" align="left" valign="top">332 participants of non-English-major undergraduate students</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><p>Hefei University of</p><p>Technology, China</p></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Rod D. Roscoe, Joshua Wilson, Adam C. Johnson, &amp; Christopher R. Mayra</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2017</td><td colspan="1" rowspan="1" style="" align="left" valign="top">W-Pal</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Essays &amp; Questionnaire</td><td colspan="1" rowspan="1" style="" align="left" valign="top">110 undergraduate students of Psychological course</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Arizona State University- Polytechnic, USA &amp; University of Delaware, Newark, USA</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Aysel Saricaoglu</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2018</td><td colspan="1" rowspan="1" style="" align="left" valign="top">ACDET</td><td colspan="1" rowspan="1" style="" align="left" valign="top">pre- and post-cause &amp; effect essay tests</td><td colspan="1" rowspan="1" style="" align="left" valign="top">31 students</td><td colspan="1" rowspan="1" style="" align="left" valign="top">TED University, Turkey</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Lili Tian &amp;Yu Zhou</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2020</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Pigai</td><td colspan="1" rowspan="1" style="" align="left" valign="top">90 essays</td><td colspan="1" rowspan="1" style="" align="left" valign="top">ﬁve sophomores of online English writing course</td><td colspan="1" rowspan="1" style="" align="left" valign="top">University of Auckland, New Zealand</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Thomas Daniel Ullmann</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2019</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Machine Learning</td><td colspan="1" rowspan="1" style="" align="left" valign="top">76 essays</td><td colspan="1" rowspan="1" style="" align="left" valign="top">76 students of health, business, and engineering students</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Institute of Educational Technology, UK</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Zhijie Wang</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2020</td><td colspan="1" rowspan="1" style="" align="left" valign="top">automated essay evaluation (AEE: iWrite, Awrite, and Pigai)</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Observation, semi-structured interview, and questionnaire</td><td colspan="1" rowspan="1" style="" align="left" valign="top">188 students from China Agricultural University</td><td colspan="1" rowspan="1" style="" align="left" valign="top">China Agricultural University, Beijing, China</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Zhe (Victor) Zhang &amp; Ken Hyland</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2018</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Pigai</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Student texts, teacher feedback, AWE feedback, and student interviews.</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Two Chinese students of English</td><td colspan="1" rowspan="1" style="" align="left" valign="top">The University of Hong Kong &amp; University of East Anglia, Norwich, UK</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Zhe Victor Zhang</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2020</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Pigai</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Student written texts, AWE feedback, &amp; student interviews</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Three Chinese students of English major</td><td colspan="1" rowspan="1" style="" align="left" valign="top">The Chinese University of Hong Kong, Hong Kong</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Brent Bridgeman &amp; Chaitanya Ramineni</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2017</td><td colspan="1" rowspan="1" style="" align="left" valign="top">e-rater</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Students’ writings, students’ questionnaire, &amp; a faculty member questionnaire</td><td colspan="1" rowspan="1" style="" align="left" valign="top">194 graduate students</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Educational Testing Service, United States</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Jim Ranalli, Stephanie Link &amp; Evgeny Chukharev-Hudilainen</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2016</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Argumentative writing tasks</td><td colspan="1" rowspan="1" style="" align="left" valign="top">82 students (36 lower-level students and 46 higher level students) of Iowa State University</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Iowa State University, United States</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Andreas Lachner, Christian Burkhart &amp; Matthias Nückles</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2017</td><td colspan="1" rowspan="1" style="" align="left" valign="top">CohViz</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Students’ essays</td><td colspan="1" rowspan="1" style="" align="left" valign="top">251 students</td><td colspan="1" rowspan="1" style="" align="left" valign="top">University of Freiburg, Germany &amp; University of Tübingen, Germany</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Mohammed Ali Mohsen &amp; Abdulaziz Alshahrani</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2019</td><td colspan="1" rowspan="1" style="" align="left" valign="top">MY Access!</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Students’ essays</td><td colspan="1" rowspan="1" style="" align="left" valign="top">6 Arab students of EFL</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Najran University, Saudi Arabia</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top"><list list-type="order"><list-item><p>Gary Cheng</p></list-item></list></td><td colspan="1" rowspan="1" style="" align="left" valign="top">2017</td><td colspan="1" rowspan="1" style="" align="left" valign="top">online automated feedback (OAF)</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Students’ reﬂective journals, online questionnaire &amp; focus group interview</td><td colspan="1" rowspan="1" style="" align="left" valign="top">138 undergraduate students</td><td colspan="1" rowspan="1" style="" align="left" valign="top">The Education University of Hong Kong</td></tr></tbody></table></table-wrap></sec><sec><title>3. Result and Discussion</title><p>The analysis revealed the utilization of various Automated Writing Evaluation (AWE) software by researchers over the last five years (2016-2020) (<xref ref-type="fig" rid="figure-2">Figure 2</xref>). Notably, Criterion was employed by multiple researchers during this period (<xref ref-type="bibr" rid="BIBR-18">(Li et al., 2017)</xref>; <xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref>, <xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref>; <xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref>; <xref ref-type="bibr" rid="BIBR-28">(Ranalli et al., 2017)</xref>), while Pigai was utilized by four researchers (<xref ref-type="bibr" rid="BIBR-40">(Tian &amp; Zhou, 2020)</xref>; <xref ref-type="bibr" rid="BIBR-42">(Wang et al., 2020)</xref>; <xref ref-type="bibr" rid="BIBR-47">(Zhang, 2020)</xref>; <xref ref-type="bibr" rid="BIBR-48">(Zhang &amp; Hyland, 2018)</xref>). Additionally, various other software programs were employed by different researchers, including ACDET <xref ref-type="bibr" rid="BIBR-31">(Saricaoglu, 2019)</xref>, WriteToLearn <xref ref-type="bibr" rid="BIBR-24">(Liu &amp; Kunnan, 2016)</xref>, iWrite (<xref ref-type="bibr" rid="BIBR-27">(Qian et al., 2020)</xref>; <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>), Awrite <xref ref-type="bibr" rid="BIBR-42">(Wang et al., 2020)</xref>, W-Pal <xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref>, AWCF of Grammarly <xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref>, Machine Learning <xref ref-type="bibr" rid="BIBR-41">(Ullmann, 2019)</xref>, e-rater <xref ref-type="bibr" rid="BIBR-3">(Bridgeman &amp; Ramineni, 2017)</xref>, CyWrite <xref ref-type="bibr" rid="BIBR-28">(Ranalli et al., 2017)</xref>, CohViz <xref ref-type="bibr" rid="BIBR-17">(Lachner et al., 2017)</xref>, My Access! <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref>, and Online Automated Feedback (OAF) <xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref>. In summary, researchers employed a total of 14 different AWE software programs over the five-year period from 2016 to 2020, highlighting the diverse range of options available for providing corrective feedback to students’ writing. These findings corroborate previous research indicating the varied nature of AWE systems <xref ref-type="bibr" rid="BIBR-39">(Stevenson &amp; Phakiti, 2019)</xref>.</p><fig id="figure-2" ignoredToc=""><label>Figure 2</label><caption><p>Trend of AWE Software Used by Researchers in 2016 – 2020</p></caption><graphic xlink:href="https://journals2.ums.ac.id/ijolae/article/download/9104/4397/51437" mimetype="image" mime-subtype="png"><alt-text>Image</alt-text></graphic></fig><p>Moreover, the findings suggest that Automated Writing Evaluation (AWE) software not only provided corrective feedback on students’ essays but also on their reflective journal writing. This observation is supported by research conducted by <xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref> and <xref ref-type="bibr" rid="BIBR-28">(Ranalli et al., 2017)</xref>, who investigated the impact of Online Automated Feedback (OAF) and Criterion on students’ reflective journal writing. Their studies revealed that OAF and Criterion significantly improved students’ scores in writing reflective journals, indicating the effectiveness of these tools in providing feedback on such writing tasks. Consequently, students’ understanding and willingness to revise their reflective journal writing were positively influenced by the feedback from the software, resulting in improved scores. <xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref> and <xref ref-type="bibr" rid="BIBR-28">(Ranalli et al., 2017)</xref> studies were the only ones to utilize students’ reflective journal writing in the higher education context from 2016 to 2020. In contrast, the remaining seventeen studies focused on students’ essays, which included argumentative, exposition, and narrative essays. In summary, the writing instruments used in the research studies were primarily categorized into two forms: students’ essays and students’ reflective journals.</p><sec><title>a. Students’ Perceptions of the Integration of AWE</title><p>The analysis revealed that eight research studies (<xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref>; <xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref>;<xref ref-type="bibr" rid="BIBR-18">(Li et al., 2017)</xref>; <xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref>; <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref>; <xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref>; <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>; <xref ref-type="bibr" rid="BIBR-47">(Zhang, 2020)</xref>) focused on examining students’ perceptions of Automated Writing Evaluation (AWE) as both a scoring tool and a component of classroom instruction. The results of these studies indicated varied perceptions among students regarding AWE’s utility. Students’ perceptions were categorized into two main groups in this review: positive and negative perceptions. The Summary is presented on <xref ref-type="table" rid="table-2">Table 2</xref>.</p><table-wrap id="table-2" ignoredToc=""><label>Table 2</label><caption><p>The Summary of Positive Perceptions of the Use of AWE</p></caption><table frame="box" rules="all"><thead><tr><th colspan="1" rowspan="1" style="" align="left" valign="top">Aspect of Positive Perceptions</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Percentage</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Kind of AWE</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Research Method</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Subject</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Researcher(s)</th></tr></thead><tbody><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Effectiveness in identifying strengths and weaknesses of reflective journal writing</td><td colspan="1" rowspan="1" style="" align="left" valign="top">70</td><td colspan="1" rowspan="1" style="" align="left" valign="top">OAF</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Mixed-method design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">138</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Satisfaction in giving feedback</td><td colspan="1" rowspan="1" style="" align="left" valign="top">80</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Mixed-method design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">135</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Lie et al (2017)</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Reduce Grammatical errors</td><td colspan="1" rowspan="1" style="" align="left" valign="top">-</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Experimental design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">63</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Accuracy, relevance, and usefulness in providing scoring and feedback</td><td colspan="1" rowspan="1" style="" align="left" valign="top">68.2</td><td colspan="1" rowspan="1" style="" align="left" valign="top">W-Pal</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Mixed-method design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">110</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Satisfaction in providing grammar, usage, mechanics, and syntactic complexity feedback</td><td colspan="1" rowspan="1" style="" align="left" valign="top">-</td><td colspan="1" rowspan="1" style="" align="left" valign="top">iWrite</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Experimental design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">188</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Satisfaction in identifying collocation errors</td><td colspan="1" rowspan="1" style="" align="left" valign="top">-</td><td colspan="1" rowspan="1" style="" align="left" valign="top">AEE: iWrite, Awrite, and Pigai</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Mixed-method design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">3</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-47">(Zhang, 2020)</xref></td></tr></tbody></table></table-wrap><p>The findings indicate that a significant majority (70%) of the 138 students strongly agreed that Online OAF effectively identified strengths and weaknesses in their reflective journal writing. Moreover, more than half (55%) expressed their willingness to use OAF for providing feedback on their reflective writing, while only a small fraction (3%) showed reluctance towards future usage. The most favored feature of Automated Writing Evaluation (AWE) among students was the online automatic classification system, with 60% of respondents citing its usefulness. This system received praise for its ability to identify areas for improvement (40%), provide helpful suggestions and examples (23%), offer user-friendly interface (23%), and provide immediate feedback (13%). Overall, students appreciated the system's content, convenience, and speed. Additionally, a subgroup of students ( 12) believed that OAF could enhance their understanding of basic aspects of L2 writing. They highlighted two distinct advantages of OAF over teachers: its quick analysis and accessibility, as well as its capability for archiving and restoration. Despite some shortcomings, such as occasional inability to detect certain errors, students remained proactive in seeking solutions to their L2 writing challenges <xref ref-type="bibr" rid="BIBR-11">(Dikli &amp; Bleyle, 2014)</xref><xref ref-type="bibr" rid="BIBR-13">(Graham et al., 2015)</xref>.</p><p>Further insights into positive perceptions were provided by <xref ref-type="bibr" rid="BIBR-18">(Li et al., 2017)</xref>, who surveyed 135 students. The results showed that a small minority (3%) of the 31 students interviewed expressed high satisfaction with Criterion feedback, while the majority (77%) reported satisfaction, and 10% remained neutral. Among these students, 71% expressed satisfaction with Criterion’s grammar feedback, while 10% desired more detailed feedback. Grammar feedback was deemed the most helpful type, with 77% of students endorsing it. However, certain categories within grammar, such as run-on sentences (39%), possessives (19%), and prepositions (13%), posed challenges. Corrections were perceived as either easy (45%), difficult (10%), or variable depending on Criterion’s clarity (32%). Additionally, 29% of students claimed to have addressed all feedback, while over half (58%) admitted to ignoring some feedback. The majority (71%) of students reported a positive perception of AWE, citing its effectiveness in error identification (See <xref ref-type="fig" rid="figure-2">Figure 2</xref>). Criterion was particularly instrumental in identifying errors related to articles (58%), wrong verb forms (19%), run-on sentences (19%), subject-verb agreement (13%), fragments (13%), wrong word forms (6%), pronouns (6%), possessives (3%), and faulty comparisons (3%). These findings corroborate previous research highlighting the core components of AWE, such as its scoring engine, which utilizes techniques like artificial intelligence and natural language processing <xref ref-type="bibr" rid="BIBR-37">(Stevenson, 2016)</xref>. Moreover, the use of AWE to scaffold students’ writing ability has been shown to reduce grammatical errors in L2 writing <xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref>, indicating that exposure to AWE feedback enhances learners’ understanding and application of English grammatical rules over time <xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref>.</p><p>Additional positive perceptions were documented by Roscoe et al. <xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref>, where students regarded Automated Writing Evaluation (AWE) as accurate, relevant, and useful in providing scoring and feedback. They expressed satisfaction with the quality of feedback provided by W-Pal, with 68.2% of 110 students expressing a willingness to use it again in the future. Similarly, Wang <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref> reported that students exhibited a positive attitude towards Automated Essay Evaluation (AEE) systems, including iWrite, Awrite, and Pigai, rating them as greatly or moderately helpful. Most respondents appreciated the features of these systems, particularly regarding grammar, usage, mechanics, and syntactic complexity. While they were satisfied with the content analysis provided by AEE systems, they desired more feedback on discourse elements. Zhang's <xref ref-type="bibr" rid="BIBR-47">(Zhang, 2020)</xref> findings also highlighted students' positive perceptions of AWE feedback, noting its helpfulness in L2 writing, particularly in identifying collocation errors rarely addressed by teachers. The feedback was perceived as immediate and accurate, aiding students in revising their work and fostering an understanding of the importance of revision in the writing process. Moreover, it encouraged students to adopt the practice of multiple drafting when completing writing assignments outside of writing subjects. These results underscore the effectiveness of various AWE software types in enhancing the accuracy of scoring students' writing <xref ref-type="bibr" rid="BIBR-4">(Bridgeman et al., 2012)</xref><xref ref-type="bibr" rid="BIBR-32">(Shermis, 2014)</xref>.</p><table-wrap id="table-3" ignoredToc=""><label>Table 3</label><caption><p>The Summary of Negative Perceptions of the Use of AWE</p></caption><table frame="box" rules="all"><thead><tr><th colspan="1" rowspan="1" style="" align="left" valign="top">Aspect of Dissatisfaction</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Percentage</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Kind of AWE</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Research Method</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Subject</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Researcher(s)</th></tr></thead><tbody><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Less authoritative and inaccurate of feedback</td><td colspan="1" rowspan="1" style="" align="left" valign="top">33</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Grammarly</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Case study</td><td colspan="1" rowspan="1" style="" align="left" valign="top">2</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">A frustrating tool for a reluctant student to read and comprehend</td><td colspan="1" rowspan="1" style="" align="left" valign="top">-</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Experimental design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">66</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Quality of feedback, the system’s comprehension of human language, scoring methods, and the lack of explanatory reasons</td><td colspan="1" rowspan="1" style="" align="left" valign="top">37.68</td><td colspan="1" rowspan="1" style="" align="left" valign="top">OAF</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Mixed-method design</td><td colspan="1" rowspan="1" style="" align="left" valign="top">138</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Difficulty comprehending AWE feedback on content and organization</td><td colspan="1" rowspan="1" style="" align="left" valign="top">-</td><td colspan="1" rowspan="1" style="" align="left" valign="top">My Access</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Case study</td><td colspan="1" rowspan="1" style="" align="left" valign="top">6</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref></td></tr></tbody></table></table-wrap><p>To provide a balanced view, alongside the benefits of Automated Writing Evaluation (AWE), students’ perceptions reveal significant concerns. <xref ref-type="table" rid="table-3">Table 3</xref> outlines their dissatisfaction, including feedback quality, system comprehension of human language, scoring methods, and lack of explanatory reasons. Conversely, negative perceptions towards Automated Writing Evaluation (AWE) were highlighted by Koltovskaia’s <xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref> findings, which revealed that one out of two students believed AWE’s feedback to be less authoritative than that of teachers and possibly inaccurate. This skepticism arises from the belief that automated AWE systems lack the depth of understanding and contextual awareness that human teachers offer. Students might think that AWE tools are unable to appreciate the finer details of their writing or the specific context in which it was created, which can lead to doubts about the accuracy and reliability of the feedback. Furthermore, because AWE systems rely on algorithms and fixed criteria, there is concern that they may misinterpret intricate aspects of writing or not address the unique needs of individual students as effectively as feedback from a teacher. Consequently, AWE feedback was described as the most frustrating tool to read and comprehend by a reluctant student <xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref>. The issues highlighted include the quality of feedback, the system’s comprehension of human language, scoring methods, and the lack of explanatory reasons. Among the surveyed students (52), dissatisfaction stemmed from various aspects, including the quality of feedback (35%), the system’s comprehension of human language (27%), scoring methods (19%), and lack of explanatory reasons (19%) <xref ref-type="bibr" rid="BIBR-8">(Cheng, 2017)</xref>. While students expressed a keen interest in using AWE, particularly My Access, they felt the program, particularly its word bank functionality, did not adequately benefit them. They encountered difficulties in comprehending AWE feedback, particularly regarding content and organization, perceiving it as overly general and not tailored to their needs. In contrast, teacher feedback, especially regarding content and organization, was deemed clearer and more diagnostic than AWE feedback <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref>. These findings echo previous research indicating AWE’s limitations in providing feedback that fully meets students’ needs <xref ref-type="bibr" rid="BIBR-38">(Stevenson &amp; Phakiti, 2014)</xref>.</p></sec><sec><title>b. Benefits of AWE</title><p>The final objective of this study is to delineate the advantages and limitations of Automated Writing Evaluation (AWE) as both a scoring tool for students’ writing and a facilitator of writing instruction in the classroom. Numerous benefits have been identified, including enhanced linguistic accuracy, improved student performance, increased reliability and validity, effective scoring and feedback mechanisms, and both short-term and long-term impacts on student learning. These advantages have been validated by various studies, underscoring the promising potential of AWE for practical implementation in classroom settings (<xref ref-type="table" rid="table-4">Table 4</xref>).</p><table-wrap id="table-4" ignoredToc=""><label>Table 4</label><caption><p>Benefits of Automated Writing Evaluation (AWE)</p></caption><table frame="box" rules="all"><thead><tr><th colspan="1" rowspan="1" style="" align="left" valign="top">Aspect of Benefit</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Kind of AWE</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Researcher(s)</th></tr></thead><tbody><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Offering Linguistic Accuracy</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Criterion; Grammarly; WriteToLearn.</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref>,<xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref> and <xref ref-type="bibr" rid="BIBR-28">(Ranalli et al., 2017)</xref> ; <xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref>; <xref ref-type="bibr" rid="BIBR-24">(Liu &amp; Kunnan, 2016)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Improving Students’ Performance</td><td colspan="1" rowspan="1" style="" align="left" valign="top">W-Pal; Criterion; AEE: iWrite, Awrite, and Pigai; CohViz; My Access</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref>; <xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref>; <xref ref-type="bibr" rid="BIBR-47">(Zhang, 2020)</xref>; <xref ref-type="bibr" rid="BIBR-17">(Lachner et al., 2017)</xref>; <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Providing Reliability and Validity</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Machine Learning; AEE: iWrite, Awrite, and Pigai; My Access</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-41">(Ullmann, 2019)</xref>; <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>; <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref>.</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Offering Effectiveness in Scoring and Giving Writing Feedback</td><td colspan="1" rowspan="1" style="" align="left" valign="top">AEE: iWrite, Awrite, and Pigai; Pigai</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>; <xref ref-type="bibr" rid="BIBR-48">(Zhang &amp; Hyland, 2018)</xref>.</td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Yielding Short-Term and Long-Term Effect on Students Learning</td><td colspan="1" rowspan="1" style="" align="left" valign="top"/><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref>; <xref ref-type="bibr" rid="BIBR-18">(Li et al., 2017)</xref></td></tr></tbody></table></table-wrap><list list-type="order"><list-item><p>Offering Linguistic Accuracy</p></list-item></list><p>The results indicated that Automated Writing Evaluation (AWE) had an early effect on reducing the number of fragments and subject-verb disagreements in new texts, while the reduction of run-on sentences and ill-formed verbs became noticeable towards the end of the study phase. Despite variations among categories in both revisions and new texts, a consistent trend of linguistic growth facilitated by AWE was observed, leading to improved linguistic accuracy <xref ref-type="bibr" rid="BIBR-20">(Liao, 2016)</xref>. AWE also demonstrated effectiveness in enhancing students' linguistic accuracy at a moderate level (57%) by addressing errors highlighted and suggested by the system, such as word form, articles, punctuation, spelling, prepositions, and spacing. Errors were visually highlighted in red, while suggestions were presented in green by the Automated Corrective Writing Feedback (ACWF) system <xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref>. Moreover, AWE notably improved students' grammatical accuracy in both original and revised essays of the final task, underscoring its precision in identifying grammatical errors <xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref>. WriteToLearn, a form of AWE, demonstrated greater consistency in rating papers compared to human raters, and was more accurate in identifying errors related to capitalization, spelling, punctuation, and connecting words, achieving precision rates of 100% for connecting word errors, 92.3% for capitalization errors, and 83.5% for recall. However, precision rates varied between 70% and 79% for errors related to subjectverb agreement, comma splices, and singular-plural nouns <xref ref-type="bibr" rid="BIBR-24">(Liu &amp; Kunnan, 2016)</xref>. <xref ref-type="bibr" rid="BIBR-28">(Ranalli et al., 2017)</xref> also reported a high accuracy rate of 70% in linguistic feature identification using Criterion. This is achievable because AWE systems employ algorithms designed to identify and rectify grammatical errors, spelling mistakes, and contextual issues, offering uniform feedback and personalized suggestions through sophisticated machine learning and natural language processing methods. These findings corroborate previous studies conducted by <xref ref-type="bibr" rid="BIBR-4">(Bridgeman et al., 2012)</xref><xref ref-type="bibr" rid="BIBR-32">(Shermis, 2014)</xref>, which emphasized the high accuracy of many AWE software types in scoring students' writing <xref ref-type="bibr" rid="BIBR-5">(Chapelle, 1999)</xref>.</p><list list-type="order"><list-item><p>Improving Students’ Performance</p></list-item></list><p>Some students exhibited improved performance following engagement with Automated Writing Evaluation (AWE) systems. This improvement was evidenced by higher revision scores, which correlated with positive changes in students' perceptions of feedback <xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref>. Another study revealed that AWE had a substantial impact on students' revision (92.31%) and text modifications (97.44%), indicating both short-term and long-term effects on learning and performance <xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref>. AWE also fostered increased student engagement with learning tools and enhanced their habits of drafting and revising writing <xref ref-type="bibr" rid="BIBR-47">(Zhang, 2020)</xref>. Additionally, Lachner et al. <xref ref-type="bibr" rid="BIBR-17">(Lachner et al., 2017)</xref> observed that students reported improvements in the global cohesion of their texts, attributed to the formative feedback provided by machine learning. <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref> suggested that AWE systems were valuable for evaluating students' writing and facilitating improvement. AWE improves students' writing ability by promptly identifying errors, offering instructional feedback, encouraging revisions, and tracking individual progress, all of which contribute to enhanced writing practices. While technology can serve as an assistant to instructors in second language learning, it cannot fully replace the role of instructors <xref ref-type="bibr" rid="BIBR-30">(Salaberry, 1999)</xref>.</p><list list-type="order"><list-item><p>Providing Reliability and Validity</p></list-item></list><p>Ulmann’s (2019) research identified a comprehensive reflective writing model that demonstrated reliability and validity in detecting reflection in students’ writing. The evaluation model’s quality was theoretically deemed reliable and valid for detecting reflection. While reliability and validity tests utilized a rule-based approach across various model categories, empirical validation was achieved in only one category. Evaluation detection performance revealed that the machine learning component reliably differentiated between reflective and descriptive sentences and effectively distinguished categories of sentences with or without elements such as experience, feelings, personal beliefs, awareness of difficulties, perspective, lessons learned, and intention. Automated Writing Evaluation (AWE) systems have been positively perceived for their reliable scoring methods <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>. <xref ref-type="bibr" rid="BIBR-25">(Mohsen &amp; Alshahrani, 2019)</xref>  revealed that My Access’s hybrid model enhanced writing accuracy through features like My Editor. These findings align with research by<xref ref-type="bibr" rid="BIBR-23">(Liu et al., 2018)</xref>, who utilized a model emphasizing both technical and personalistic aspects across three phases (analysis, description, and critique). Notably, the model’s high reliability in inter-rater agreement underscores its effectiveness. Additionally, the results affirm previous findings indicating that AWE is more reliable and consistent than human raters in identifying writing errors (<xref ref-type="bibr" rid="BIBR-14">(Hutchison, 2007)</xref>; <xref ref-type="bibr" rid="BIBR-33">(Shermis &amp; Hamner, 2013)</xref>).</p><list list-type="order"><list-item><p>Offering Effectiveness in Scoring and Giving Writing Feedback</p></list-item></list><p>Giving Writing Feedback AWE is often considered more effective than human scoring and feedback due to its accessibility at any time and its capacity to provide detailed content feedback. Its integration into writing classrooms has proven effective in fostering students' learning autonomy, critical thinking, and overall writing proficiency. Additionally, AWE serves as an efficient tool for sharing learning resources <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>. Students have shown preference for eight key characteristics of AEE, including its accessibility at any time, specificity, personalization, and comprehensibility compared to human rating systems <xref ref-type="bibr" rid="BIBR-43">(Wang, 2022)</xref>. <xref ref-type="bibr" rid="BIBR-48">(Zhang &amp; Hyland, 2018)</xref> also noted that 'AWE feedback offers discernible advantages over teacher feedback in terms of timeliness, convenience, multiple drafting opportunities, and even potential learner autonomy', corroborating previous findings by <xref ref-type="bibr" rid="BIBR-7">(Chen &amp; Cheng, 2008)</xref> and <xref ref-type="bibr" rid="BIBR-10">(Dikli, 2006)</xref>. The accessibility of feedback at any time and the ability to revise drafts multiple times align with earlier research <xref ref-type="bibr" rid="BIBR-9">(Cotos, 2015)</xref><xref ref-type="bibr" rid="BIBR-38">(Stevenson &amp; Phakiti, 2014)</xref><xref ref-type="bibr" rid="BIBR-44">(Warschauer &amp; Ware, 2006)</xref>.</p><list list-type="order"><list-item><p>Yielding Short-Term and Long-Term Effect on Students Learning</p></list-item></list><p>AWE has demonstrated both short-term and long-term impacts on learning and student performance, as indicated by <xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref>. Specifically, students who received instruction using AWE alongside teacher feedback exhibited long-term retention of accuracy improvement by fostering skill development, greater writing proficiency, and continuous improvement through progress tracking, whereas those taught solely with teacher feedback showed short-term retention <xref ref-type="bibr" rid="BIBR-22">(Link et al., 2022)</xref> by providing immediate feedback and encouraging prompt revisions. Additionally, AWE has shown a positive long-term effect on reducing instances of run-on sentences across all proficiency levels, as well as improvements in subject-verb agreement, with varying degrees of change observed between intermediate-high and advancedlow levels <xref ref-type="bibr" rid="BIBR-18">(Li et al., 2017)</xref>., whereas longterm, it contributes to skill growth, ongoing proficiency, and sustained development by monitoring progress over time. These longterm effects are attributed to sustained advancements in accuracy, allowing students to internalize knowledge gained from AWE corrective feedback and retain it in their long-term memory for future use <xref ref-type="bibr" rid="BIBR-2">(Bitchener, 2012)</xref>.</p><p>The use of Automated Writing Evaluation (AWE) systems presents both advantages and limitations. Despite offering timely feedback, students often express skepticism due to the perceived disparity between AWE and human feedback. Studies show AWE's weaknesses in comparison to human raters, raising questions about its effectiveness in facilitating meaningful revisions and its impact on students' writing development.</p></sec><sec><title>c. Limitations of AWE</title><p>AWE also presents certain limitations, such as students’ skepticism towards the feedback provided (<xref ref-type="table" rid="table-5">Table 5</xref>). Skepticism often arises from the perceived gap between teacher feedback, which is seen as more influential due to its expert knowledge, personalized advice, and interactive nature, and AWE feedback, which lacks these strengths and is criticized for its inability to provide nuanced, authoritative guidance and to prompt thorough revisions <xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref>. Particularly, lower-level students have expressed frustration with receiving scores and feedback devoid of human interaction <xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref>. Additionally, students have shown a tendency to prioritize the quantity of feedback over the quality of revision suggestions. While the use of causal verbs decreased from the initial to the final draft, there was no corresponding increase in the use of causal nouns. Furthermore, the findings of Saricaoglu’s <xref ref-type="bibr" rid="BIBR-31">(Saricaoglu, 2019)</xref> research study did not indicate any long-term effects.</p><p>AWE also exhibits several weaknesses, notably in comparison to human raters. In a study by Liu and Kunnan <xref ref-type="bibr" rid="BIBR-24">(Liu &amp; Kunnan, 2016)</xref>, human raters outperformed AWE in rating students’ writing, assessing 326 essays compared to AWE’s 319 essays. AWE missed identifying 7 essays, and its accuracy was lower than that of human raters, detecting only 15 errors compared to the 22 identified by human raters. Additionally, studies such as iWrite <xref ref-type="bibr" rid="BIBR-27">(Qian et al., 2020)</xref> reported no correlations between AWE and human scores, indicating poor automated scoring quality. AWE consistently yielded lower scores than human raters, and there was no correlation between the presented scores and feedback quality. These findings were echoed by Roscoe et al. <xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref>, highlighting that the scoring accuracy did not align with the initial expectations of AWE quality. Furthermore, Prompt-specific AWE engines were found to be limited in their applicability, as they could only evaluate texts written in response to pre-trained prompts <xref ref-type="bibr" rid="BIBR-39">(Stevenson &amp; Phakiti, 2019)</xref>.</p><table-wrap id="table-5" ignoredToc=""><label>Table 5</label><caption><p>Limitation of Automated Writing Evaluation (AWE)</p></caption><table frame="box" rules="all"><thead><tr><th colspan="1" rowspan="1" style="" align="left" valign="top">Aspect of Limitation</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Kind of AWE</th><th colspan="1" rowspan="1" style="" align="left" valign="top">Researcher(s)</th></tr></thead><tbody><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Lacks authority to prompt thorough revisions</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Grammarly; Criterion; ACDET</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-15">(Koltovskaia, 2020)</xref>; <xref ref-type="bibr" rid="BIBR-19">(Liao, 2016)</xref>; <xref ref-type="bibr" rid="BIBR-31">(Saricaoglu, 2019)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">AWE fell short of human raters in rating students’ writing</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Four Human Raters &amp; WriteToLearn</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-24">(Liu &amp; Kunnan, 2016)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Poor automated scoring quality</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Two expert raters &amp; iWrite; W-Pal</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-27">(Qian et al., 2020)</xref>; <xref ref-type="bibr" rid="BIBR-29">(Roscoe et al., 2017)</xref></td></tr><tr><td colspan="1" rowspan="1" style="" align="left" valign="top">Provide low-level feedback</td><td colspan="1" rowspan="1" style="" align="left" valign="top">Pigai</td><td colspan="1" rowspan="1" style="" align="left" valign="top"><xref ref-type="bibr" rid="BIBR-40">(Tian &amp; Zhou, 2020)</xref></td></tr></tbody></table></table-wrap><p>The research findings revealed that automated feedback primarily provided low-level feedback, with lexical meaning receiving a 66.2% rating and a mere 5.7% uptake rate, while grammar and mechanics garnered a 68.7% rating with a 46.4% uptake rate. When compared to peer and teacher feedback, automated feedback exhibited the lowest uptake rate, followed by peer feedback, with teacher feedback being the most authoritative among the three <xref ref-type="bibr" rid="BIBR-40">(Tian &amp; Zhou, 2020)</xref>. These findings corroborate previous research highlighting the detrimental effects of AWE when acting as a non-human audience <xref ref-type="bibr" rid="BIBR-37">(Stevenson, 2016)</xref>.</p></sec></sec><sec><title>4. Conclusion</title><p>This systematic literature review aimed to examine various AWE software used by researchers within five years in teaching writing. It also unveils student perceptions, as well as advantages and disadvantages of using the tools in writing. 14 kinds of AWE software were used in the reviewed articles. The findings showed that AWE tools are generally perceived positively by students, particularly for scoring, enhancing writing skills, and promoting engagement, which encouraged more thorough revision habits. Given the findings, some benefits are obtained, including enhancing language precision and writing performance, providing effective scoring and writing feedback, developing short-term and long-term impact on student learning, and providing reliable and valid feedback. However, some drawbacks were noted, such as distrust in AWE feedback and a preference for human evaluators.</p><p>The review's limited scope suggests that future research should encompass broader contexts and longer durations. Additionally, future researchers should explore the integration of AWE with other educational technologies, assess the long-term impact on students' writing skills beyond higher education, and investigate the effectiveness of AWE in diverse educational settings and with varied learner demographics. Comparative studies between AWE and human feedback, as well as the development of more advanced and context-sensitive AWE systems, could also provide valuable insights. Furthermore, qualitative studies on student and instructor attitudes towards AWE could enrich understanding of its practical applications and limitations.</p></sec></body><back><ref-list><title>References</title><ref id="BIBR-1"><element-citation publication-type="article-journal"><article-title>Leveraging technology to improve learning independence in chemistry: A study on Moodle integration</article-title><source>Indonesian Journal on Learning and Advanced Education (IJOLAE</source><volume>6</volume><issue>3</issue><person-group person-group-type="author"><name><surname>Abidin</surname><given-names>N.L.F.</given-names></name><name><surname>Dwiningsih</surname><given-names>K.</given-names></name><name><surname>Jehwae</surname><given-names>P.</given-names></name><name><surname>Sari</surname><given-names>C.K.</given-names></name></person-group><year>2024</year><fpage>365</fpage><lpage>386</lpage><page-range>365-386</page-range></element-citation></ref><ref id="BIBR-2"><element-citation publication-type="article-journal"><article-title>A Reflection on ‘the Language Learning Potential’ of Written CF</article-title><source>Journal of Second Language Writing</source><volume>21</volume><issue>4</issue><person-group person-group-type="author"><name><surname>Bitchener</surname><given-names>John</given-names></name></person-group><year>2012</year><fpage>348</fpage><lpage>63</lpage><page-range>348-63</page-range><pub-id pub-id-type="doi">10.1016/j.jslw.2012.09.006</pub-id></element-citation></ref><ref id="BIBR-3"><element-citation publication-type="article-journal"><article-title>Design and Evaluation of Automated Writing Evaluation Models: Relationships with Writing in Naturalistic Settings</article-title><source>Assessing Writing</source><volume>34</volume><person-group person-group-type="author"><name><surname>Bridgeman</surname><given-names>Brent</given-names></name><name><surname>Ramineni</surname><given-names>Chaitanya</given-names></name></person-group><year>2017</year><fpage>62</fpage><lpage>71</lpage><page-range>62-71</page-range><pub-id pub-id-type="doi">10.1016/j.asw.2017.10.001</pub-id></element-citation></ref><ref id="BIBR-4"><element-citation publication-type="article-journal"><article-title>Comparison of Human and Machine Scoring of Essays: Differences by Gender, Ethnicity, and Country</article-title><source>Applied Measurement in Education</source><volume>25</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Bridgeman</surname><given-names>Brent</given-names></name><name><surname>Trapani</surname><given-names>Catherine</given-names></name><name><surname>Attali</surname><given-names>Yigal</given-names></name></person-group><year>2012</year><fpage>27</fpage><lpage>40</lpage><page-range>27-40</page-range><pub-id pub-id-type="doi">10.1080/08957347.2012.635502</pub-id></element-citation></ref><ref id="BIBR-5"><element-citation publication-type="article-journal"><article-title>Research Questions for a CALL Research Agenda: A Reply to Rafael Salaberry</article-title><source>Language Learning &amp; Technology</source><volume>37</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Chapelle</surname><given-names>C.</given-names></name></person-group><year>1999</year><fpage>108</fpage><lpage>13</lpage><page-range>108-13</page-range></element-citation></ref><ref id="BIBR-6"><element-citation publication-type="article-journal"><article-title>20 Years of Technology and Language Assessment in Language Learning &amp; Technology</article-title><source>Language Learning and Technology</source><volume>20</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Chapelle</surname><given-names>Carol A.</given-names></name><name><surname>Voss</surname><given-names>Erik</given-names></name></person-group><year>2016</year><fpage>116</fpage><lpage>28</lpage><page-range>116-28</page-range></element-citation></ref><ref id="BIBR-7"><element-citation publication-type="article-journal"><article-title>Beyond the Design of Automated Writing Evaluation: Pedagogical Practices and Perceived Learning Effectiveness in Efl Writing Classes</article-title><source>Language Learning and Technology</source><volume>12</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Chen</surname><given-names>Chi Fen Emily</given-names></name><name><surname>Cheng</surname><given-names>Wei Yuan Eugene</given-names></name></person-group><year>2008</year><fpage>94</fpage><lpage>112</lpage><page-range>94-112</page-range></element-citation></ref><ref id="BIBR-8"><element-citation publication-type="article-journal"><article-title>The Impact of Online Automated Feedback on Students</article-title><source>Reflective Journal Writing in an EFL Course.” The Internet and Higher Education</source><volume>34</volume><person-group person-group-type="author"><name><surname>Cheng</surname><given-names>Gary</given-names></name></person-group><year>2017</year><fpage>18</fpage><lpage>27</lpage><page-range>18-27</page-range><pub-id pub-id-type="doi">10.1016/j.iheduc.2017.04.002</pub-id></element-citation></ref><ref id="BIBR-9"><element-citation publication-type="article-journal"><article-title>AWE for Writing Pedagogy: From Healthy Tension to Tangible Prospects</article-title><source>Writing &amp; Pedagogy</source><volume>7</volume><issue>2–3</issue><person-group person-group-type="author"><name><surname>Cotos</surname><given-names>E.</given-names></name></person-group><year>2015</year><fpage>197</fpage><lpage>231</lpage><page-range>197-231</page-range></element-citation></ref><ref id="BIBR-10"><element-citation publication-type="article-journal"><article-title>An Overview of Automated Scoring of Essays</article-title><source>The Journal of Technology, Learning and Assessment</source><volume>5(1</volume><person-group person-group-type="author"><name><surname>Dikli</surname><given-names>Semire</given-names></name></person-group><year>2006</year><page-range>-</page-range></element-citation></ref><ref id="BIBR-11"><element-citation publication-type="article-journal"><article-title>Automated Essay Scoring Feedback for Second Language Writers: How Does It Compare to Instructor Feedback?</article-title><source>Assessing Writing</source><volume>22</volume><person-group person-group-type="author"><name><surname>Dikli</surname><given-names>Semire</given-names></name><name><surname>Bleyle</surname><given-names>Susan</given-names></name></person-group><year>2014</year><fpage>1</fpage><lpage>17</lpage><page-range>1-17</page-range><pub-id pub-id-type="doi">10.1016/j.asw.2014.03.006</pub-id></element-citation></ref><ref id="BIBR-12"><element-citation publication-type="book"><article-title>The Measurement of Writing Ability</article-title><person-group person-group-type="author"><name><surname>Godshalk</surname><given-names>F.</given-names></name><name><surname>Swineford</surname><given-names>F.</given-names></name><name><surname>Coffman</surname><given-names>W.</given-names></name><etal/></person-group><year>1966</year><publisher-name>College Entrance Examination Board</publisher-name><publisher-loc>New York</publisher-loc></element-citation></ref><ref id="BIBR-13"><element-citation publication-type="article-journal"><article-title>Formative Assessment and Writing: A Meta-Analysis</article-title><source>The Elementary School Journal</source><volume>115</volume><issue>4</issue><person-group person-group-type="author"><name><surname>Graham</surname><given-names>Steve</given-names></name><name><surname>Hebert</surname><given-names>Michael</given-names></name><name><surname>Harris</surname><given-names>Karen R.</given-names></name></person-group><year>2015</year><fpage>523</fpage><lpage>47</lpage><page-range>523-47</page-range></element-citation></ref><ref id="BIBR-14"><element-citation publication-type="article-journal"><article-title>An Evaluation of Computerised Essay Marking for National Curriculum Assessment in the UK for 11‐year‐olds</article-title><source>British Journal of Educational Technology</source><volume>38</volume><issue>6</issue><person-group person-group-type="author"><name><surname>Hutchison</surname><given-names>Dougal</given-names></name></person-group><year>2007</year><fpage>977</fpage><lpage>89</lpage><page-range>977-89</page-range><pub-id pub-id-type="doi">10.1111/j.1467-8535.2006.00686.x</pub-id></element-citation></ref><ref id="BIBR-15"><element-citation publication-type="article-journal"><article-title>Student Engagement with Automated Written Corrective Feedback (AWCF) Provided by Grammarly: A Multiple Case Study</article-title><source>Assessing Writing</source><volume>44</volume><issue>100450</issue><person-group person-group-type="author"><name><surname>Koltovskaia</surname><given-names>Svetlana</given-names></name></person-group><year>2020</year><pub-id pub-id-type="doi">10.1016/j.asw.2020.100450</pub-id></element-citation></ref><ref id="BIBR-16"><element-citation publication-type="article-journal"><article-title>Introducing Standardized EFL/ESL Exams</article-title><source>Language Learning and Technology</source><volume>11</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Laborda</surname><given-names>Jesus Garcia</given-names></name></person-group><year>2007</year><fpage>3</fpage><lpage>9</lpage><page-range>3-9</page-range></element-citation></ref><ref id="BIBR-17"><element-citation publication-type="article-journal"><article-title>Formative Computer-Based Feedback in the University Classroom: Specific Concept Maps Scaffold Students’ Writing</article-title><source>Computers in Human Behavior</source><volume>72</volume><person-group person-group-type="author"><name><surname>Lachner</surname><given-names>Andreas</given-names></name><name><surname>Burkhart</surname><given-names>Christian</given-names></name><name><surname>Nückles</surname><given-names>Matthias</given-names></name></person-group><year>2017</year><fpage>459</fpage><lpage>69</lpage><page-range>459-69</page-range><pub-id pub-id-type="doi">10.1016/j.chb.2017.03.008</pub-id></element-citation></ref><ref id="BIBR-18"><element-citation publication-type="article-journal"><article-title>The Short-Term and Long-Term Effects of AWE Feedback on ESL Students’ Development of Grammatical Accuracy</article-title><source>CALICO Journal</source><volume>34</volume><issue>3</issue><person-group person-group-type="author"><name><surname>Li</surname><given-names>Zhi</given-names></name><name><surname>Feng</surname><given-names>Hui-Hsien</given-names></name><name><surname>Saricaoglu</surname><given-names>Aysel</given-names></name></person-group><year>2017</year><fpage>355</fpage><lpage>75</lpage><page-range>355-75</page-range></element-citation></ref><ref id="BIBR-19"><element-citation publication-type="article-journal"><article-title>Enhancing the Grammatical Accuracy of EFL Writing by Using an AWE-Assisted Process Approach</article-title><source>System</source><volume>62</volume><person-group person-group-type="author"><name><surname>Liao</surname><given-names>Hui-Chuan</given-names></name></person-group><year>2016</year><fpage>77</fpage><lpage>92</lpage><page-range>77-92</page-range><pub-id pub-id-type="doi">10.1016/j.system.2016.02.007</pub-id></element-citation></ref><ref id="BIBR-20"><element-citation publication-type="article-journal"><article-title>Using Automated Writing Evaluation to Reduce Grammar Errors in Writing</article-title><source>ELT Journal</source><volume>70</volume><issue>3</issue><person-group person-group-type="author"><name><surname>Liao</surname><given-names>Hui-Chuan</given-names></name></person-group><year>2016</year><fpage>308</fpage><lpage>19</lpage><page-range>308-19</page-range><pub-id pub-id-type="doi">10.1093/elt/ccv058</pub-id></element-citation></ref><ref id="BIBR-21"><element-citation publication-type="article-journal"><article-title>Teaching Writing with Language Feedback Technology</article-title><source>Computers and Composition</source><volume>54</volume><issue>102518</issue><person-group person-group-type="author"><name><surname>Lim</surname><given-names>Fei Victor</given-names></name><name><surname>Phua</surname><given-names>Jean</given-names></name></person-group><year>2019</year><pub-id pub-id-type="doi">10.1016/j.compcom.2019.102518</pub-id></element-citation></ref><ref id="BIBR-22"><element-citation publication-type="article-journal"><article-title>Impact of Automated Writing Evaluation on Teacher Feedback</article-title><source>Student Revision, and Writing Improvement.” Computer Assisted Language Learning</source><volume>35</volume><issue>4</issue><person-group person-group-type="author"><name><surname>Link</surname><given-names>Stephanie</given-names></name><name><surname>Mehrzad</surname><given-names>Mohaddeseh</given-names></name><name><surname>Rahimi</surname><given-names>Mohammad</given-names></name></person-group><year>2022</year><fpage>605</fpage><lpage>34</lpage><page-range>605-34</page-range><pub-id pub-id-type="doi">10.1080/09588221.2020.1743323</pub-id></element-citation></ref><ref id="BIBR-23"><element-citation publication-type="article-journal"><article-title>Mining Online Discussion Data for Understanding Teachers Reflective Thinking</article-title><source>IEEE Transactions on Learning Technologies</source><volume>11</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Liu</surname><given-names>Q.</given-names></name><name><surname>Zhang</surname><given-names>S.</given-names></name><name><surname>Wang</surname><given-names>Q.</given-names></name><name><surname>Chen</surname><given-names>W.</given-names></name></person-group><year>2018</year><fpage>243</fpage><lpage>54</lpage><page-range>243-54</page-range><pub-id pub-id-type="doi">10.1109/TLT.2017.2708115</pub-id></element-citation></ref><ref id="BIBR-24"><element-citation publication-type="article-journal"><article-title>Investigating the Application of Automated Writing Evaluation to Chinese Undergraduate English Majors: A Case Study of WriteToLearn</article-title><source>CALICO Journal</source><volume>33</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Liu</surname><given-names>Sha</given-names></name><name><surname>Kunnan</surname><given-names>Antony John</given-names></name></person-group><year>2016</year><fpage>71</fpage><lpage>91</lpage><page-range>71-91</page-range></element-citation></ref><ref id="BIBR-25"><element-citation publication-type="article-journal"><article-title>The Effectiveness of Using a Hybrid Mode of Automated Writing Evaluation System on Efl Students’ Writing</article-title><source>Teaching English with Technology</source><volume>19</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Mohsen</surname><given-names>Mohammed Ali</given-names></name><name><surname>Alshahrani</surname><given-names>Abdulaziz</given-names></name></person-group><year>2019</year><fpage>118</fpage><lpage>31</lpage><page-range>118-31</page-range></element-citation></ref><ref id="BIBR-26"><element-citation publication-type="article-journal"><article-title>Secondary School Teachers’ Accessibility to Internet Facilities for Advanced Instruction in Nigeria</article-title><source>Indonesian Journal on Learning and Advanced Education (IJOLAE</source><volume>3</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Onojah</surname><given-names>A.A.</given-names></name><name><surname>Onojah</surname><given-names>A.O.</given-names></name><name><surname>Olumorin</surname><given-names>C.O.</given-names></name><name><surname>Omosewo</surname><given-names>E.O.</given-names></name></person-group><year>2021</year><fpage>86</fpage><lpage>95</lpage><page-range>86-95</page-range></element-citation></ref><ref id="BIBR-27"><element-citation publication-type="article-journal"><article-title>Evaluating China’s Automated Essay Scoring System IWrite</article-title><source>Journal of Educational Computing Research</source><volume>58</volume><issue>4</issue><person-group person-group-type="author"><name><surname>Qian</surname><given-names>Leyi</given-names></name><name><surname>Zhao</surname><given-names>Yali</given-names></name><name><surname>Cheng</surname><given-names>Yan</given-names></name></person-group><year>2020</year><fpage>771</fpage><lpage>90</lpage><page-range>771-90</page-range><pub-id pub-id-type="doi">10.1177/0735633119881472</pub-id></element-citation></ref><ref id="BIBR-28"><element-citation publication-type="article-journal"><article-title>Automated Writing Evaluation for Formative Assessment of Second Language Writing: Investigating the Accuracy and Usefulness of Feedback as Part of Argument-Based Validation</article-title><source>Educational Psychology</source><volume>37</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Ranalli</surname><given-names>Jim</given-names></name><name><surname>Link</surname><given-names>Stephanie</given-names></name><name><surname>Chukharev-Hudilainen</surname><given-names>Evgeny</given-names></name></person-group><year>2017</year><fpage>8</fpage><lpage>25</lpage><page-range>8-25</page-range><pub-id pub-id-type="doi">10.1080/01443410.2015.1136407</pub-id></element-citation></ref><ref id="BIBR-29"><element-citation publication-type="article-journal"><article-title>Presentation, Expectations, and Experience: Sources of Student Perceptions of Automated Writing Evaluation</article-title><source>Computers in Human Behavior</source><volume>70</volume><person-group person-group-type="author"><name><surname>Roscoe</surname><given-names>Rod D.</given-names></name><name><surname>Wilson</surname><given-names>Joshua</given-names></name><name><surname>Johnson</surname><given-names>Adam C.</given-names></name><name><surname>Mayra</surname><given-names>Christopher R.</given-names></name></person-group><year>2017</year><fpage>207</fpage><lpage>21</lpage><page-range>207-21</page-range><pub-id pub-id-type="doi">10.1016/j.chb.2016.12.076</pub-id></element-citation></ref><ref id="BIBR-30"><element-citation publication-type="article-journal"><article-title>CALL in the Year 2000: Still Developing the Research Agenda</article-title><source>Language Learning &amp; Technology</source><volume>3</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Salaberry</surname><given-names>Rafael</given-names></name></person-group><year>1999</year><fpage>104</fpage><lpage>7</lpage><page-range>104-7</page-range></element-citation></ref><ref id="BIBR-31"><element-citation publication-type="article-journal"><article-title>The Impact of Automated Feedback on L2 Learners’ Written Causal Explanations</article-title><source>ReCALL</source><volume>31</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Saricaoglu</surname><given-names>Aysel</given-names></name></person-group><year>2019</year><fpage>189</fpage><lpage>203</lpage><page-range>189-203</page-range><pub-id pub-id-type="doi">10.1017/S095834401800006X</pub-id></element-citation></ref><ref id="BIBR-32"><element-citation publication-type="article-journal"><article-title>State-of-the-Art Automated Essay Scoring: Competition, Results, and Future Directions from a United States Demonstration</article-title><source>Assessing Writing</source><volume>20</volume><person-group person-group-type="author"><name><surname>Shermis</surname><given-names>Mark D.</given-names></name></person-group><year>2014</year><fpage>53</fpage><lpage>76</lpage><page-range>53-76</page-range><pub-id pub-id-type="doi">10.1016/j.asw.2013.04.001</pub-id></element-citation></ref><ref id="BIBR-33"><element-citation publication-type="chapter"><article-title>Contrasting State-of-the-Art Automated Scoring of Essays</article-title><source>Handbook of automated essay evaluation. Routledge</source><person-group person-group-type="author"><name><surname>Shermis</surname><given-names>Mark D.</given-names></name><name><surname>Hamner</surname><given-names>Ben</given-names></name></person-group><year>2013</year><fpage>313</fpage><lpage>46</lpage><page-range>313-46</page-range></element-citation></ref><ref id="BIBR-34"><element-citation publication-type="article-journal"><article-title>Scaffolding Assignments: Analysis of AssignMentor as a Tool to Support First Year Students’ Academic Writing Skills</article-title><source>E-Learning and Digital Media</source><volume>14</volume><issue>1–2</issue><person-group person-group-type="author"><name><surname>Silva</surname><given-names>Pedro</given-names></name></person-group><year>2017</year><fpage>86</fpage><lpage>97</lpage><page-range>86-97</page-range><pub-id pub-id-type="doi">10.1177/2042753017695652</pub-id></element-citation></ref><ref id="BIBR-35"><element-citation publication-type="article-journal"><article-title>Education application Testing Perspective to Empower Students' Higher Order Thinking Skills Related to the Concept of Adaptive Learning Media</article-title><source>Indonesian Journal on Learning and Advanced Education (IJOLAE</source><volume>4</volume><issue>3</issue><person-group person-group-type="author"><name><surname>Sulistyanto</surname><given-names>H.</given-names></name><name><surname>Anif</surname><given-names>S.</given-names></name><name><surname>Sutama</surname><given-names>S.</given-names></name><name><surname>Narimo</surname><given-names>S.</given-names></name><name><surname>Sutopo</surname><given-names>A.</given-names></name><name><surname>Haq</surname><given-names>M.I.</given-names></name><name><surname>Nasir</surname><given-names>G.A.</given-names></name></person-group><year>2022</year><fpage>257</fpage><lpage>271</lpage><page-range>257-271</page-range></element-citation></ref><ref id="BIBR-36"><element-citation publication-type="article-journal"><article-title>Impact of Adaptive Educational Game Applications on Improving Student Learning: Efforts to Introduce Nusantara Culture in Indonesia</article-title><source>Indonesian Journal on Learning and Advanced Education (IJOLAE</source><volume>5</volume><issue>3</issue><person-group person-group-type="author"><name><surname>Sulistyanto</surname><given-names>H.</given-names></name><name><surname>Sumardjoko</surname><given-names>B.</given-names></name><name><surname>Haq</surname><given-names>M.I.</given-names></name><name><surname>Zakaria</surname><given-names>G.A.N.</given-names></name><name><surname>Narimo</surname><given-names>S.</given-names></name><name><surname>Astuti</surname><given-names>D.</given-names></name><name><surname>Adhantoro</surname><given-names>M.S.</given-names></name><name><surname>Setyabudi</surname><given-names>D.P.</given-names></name><name><surname>Sidiq</surname><given-names>Y.</given-names></name><name><surname>Ishartono</surname><given-names>N.</given-names></name></person-group><year>2023</year><fpage>249</fpage><lpage>261</lpage><page-range>249-261</page-range><pub-id pub-id-type="doi">10.23917/ijolae.v5i3.23004</pub-id></element-citation></ref><ref id="BIBR-37"><element-citation publication-type="article-journal"><article-title>A Critical Interpretative Synthesis: The Integration of Automated Writing Evaluation into Classroom Writing Instruction</article-title><source>Computers and Composition</source><volume>42</volume><person-group person-group-type="author"><name><surname>Stevenson</surname><given-names>Marie</given-names></name></person-group><year>2016</year><fpage>1</fpage><lpage>16</lpage><page-range>1-16</page-range><pub-id pub-id-type="doi">10.1016/j.compcom.2016.05.001</pub-id></element-citation></ref><ref id="BIBR-38"><element-citation publication-type="article-journal"><article-title>The Effects of Computer-Generated Feedback on the Quality of Writing</article-title><source>Assessing Writing</source><volume>19</volume><person-group person-group-type="author"><name><surname>Stevenson</surname><given-names>Marie</given-names></name><name><surname>Phakiti</surname><given-names>Aek</given-names></name></person-group><year>2014</year><fpage>51</fpage><lpage>65</lpage><page-range>51-65</page-range><pub-id pub-id-type="doi">10.1016/j.asw.2013.11.007</pub-id></element-citation></ref><ref id="BIBR-39"><element-citation publication-type="chapter"><article-title>Automated Feedback and Second Language Writing</article-title><source>Feedback in second language writing: Contexts and issues</source><person-group person-group-type="author"><name><surname>Stevenson</surname><given-names>Marie</given-names></name><name><surname>Phakiti</surname><given-names>Aek</given-names></name></person-group><year>2019</year><fpage>125</fpage><lpage>42</lpage><page-range>125-42</page-range></element-citation></ref><ref id="BIBR-40"><element-citation publication-type="article-journal"><article-title>Learner Engagement with Automated Feedback, Peer Feedback and Teacher Feedback in an Online EFL Writing Context</article-title><source>System</source><volume>91</volume><issue>102247</issue><person-group person-group-type="author"><name><surname>Tian</surname><given-names>Lili</given-names></name><name><surname>Zhou</surname><given-names>Yu</given-names></name></person-group><year>2020</year><pub-id pub-id-type="doi">10.1016/j.system.2020.102247</pub-id></element-citation></ref><ref id="BIBR-41"><element-citation publication-type="article-journal"><article-title>Automated Analysis of Reflection in Writing: Validating Machine Learning Approaches</article-title><source>International Journal of Artificial Intelligence in Education</source><volume>29</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Ullmann</surname><given-names>Thomas Daniel</given-names></name></person-group><year>2019</year><fpage>217</fpage><lpage>57</lpage><page-range>217-57</page-range><pub-id pub-id-type="doi">10.1007/s40593-019-00174-2</pub-id></element-citation></ref><ref id="BIBR-42"><element-citation publication-type="article-journal"><article-title>ERevis(Ing): Students’ Revision of Text Evidence Use in an Automated Writing Evaluation System</article-title><source>Assessing Writing</source><volume>44</volume><issue>100449</issue><person-group person-group-type="author"><name><surname>Wang</surname><given-names>Elaine Lin</given-names></name><name><surname>Matsumura</surname><given-names>Lindsay Clare</given-names></name><name><surname>Correnti</surname><given-names>Richard</given-names></name><name><surname>Litman</surname><given-names>Diane</given-names></name><name><surname>Zhang</surname><given-names>Haoran</given-names></name><name><surname>Howe</surname><given-names>Emily</given-names></name><name><surname>Magooda</surname><given-names>Ahmed</given-names></name><name><surname>Quintana</surname><given-names>Rafael</given-names></name></person-group><year>2020</year><pub-id pub-id-type="doi">10.1016/j.asw.2020.100449</pub-id></element-citation></ref><ref id="BIBR-43"><element-citation publication-type="article-journal"><article-title>Computer-Assisted EFL Writing and Evaluations Based on Artificial Intelligence: A Case from a College Reading and Writing Course</article-title><source>Library Hi Tech</source><volume>40</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Wang</surname><given-names>Zhijie</given-names></name></person-group><year>2022</year><fpage>80</fpage><lpage>97</lpage><page-range>80-97</page-range><pub-id pub-id-type="doi">10.1108/LHT-05-2020-0113</pub-id></element-citation></ref><ref id="BIBR-44"><element-citation publication-type="article-journal"><article-title>Automated Writing Evaluation: Defining the Classroom Research Agenda</article-title><source>Language Teaching Research</source><volume>10</volume><issue>2</issue><person-group person-group-type="author"><name><surname>Warschauer</surname><given-names>Mark</given-names></name><name><surname>Ware</surname><given-names>Paige</given-names></name></person-group><year>2006</year><fpage>157</fpage><lpage>80</lpage><page-range>157-80</page-range><pub-id pub-id-type="doi">10.1191/1362168806lr190oa</pub-id></element-citation></ref><ref id="BIBR-45"><element-citation publication-type="article-journal"><article-title>Automated Essay Evaluation Software in English Language Arts Classrooms: Effects on Teacher Feedback, Student Motivation, and Writing Quality</article-title><source>Computers &amp; Education</source><volume>100</volume><person-group person-group-type="author"><name><surname>Wilson</surname><given-names>Joshua</given-names></name><name><surname>Czik</surname><given-names>Amanda</given-names></name></person-group><year>2016</year><fpage>94</fpage><lpage>109</lpage><page-range>94-109</page-range><pub-id pub-id-type="doi">10.1016/j.compedu.2016.05.004</pub-id></element-citation></ref><ref id="BIBR-46"><element-citation publication-type="article-journal"><article-title>Uncovering Rater’s Cognitive Processing and Focus Using Think-Aloud Protocols</article-title><source>Journal of Writing Assessment</source><volume>2</volume><issue>1</issue><person-group person-group-type="author"><name><surname>Wolfe</surname><given-names>E.M.</given-names></name></person-group><year>2005</year><fpage>37</fpage><lpage>56</lpage><page-range>37-56</page-range></element-citation></ref><ref id="BIBR-47"><element-citation publication-type="article-journal"><article-title>Engaging with Automated Writing Evaluation (AWE) Feedback on L2 Writing: Student Perceptions and Revisions</article-title><source>Assessing Writing</source><volume>43</volume><issue>100439</issue><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Zhe</given-names></name></person-group><year>2020</year><pub-id pub-id-type="doi">10.1016/j.asw.2019.100439</pub-id></element-citation></ref><ref id="BIBR-48"><element-citation publication-type="article-journal"><article-title>Student Engagement with Teacher and Automated Feedback on L2 Writing</article-title><source>Assessing Writing</source><volume>36</volume><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Zhe</given-names></name><name><surname>Hyland</surname><given-names>Ken</given-names></name></person-group><year>2018</year><fpage>90</fpage><lpage>102</lpage><page-range>90-102</page-range><pub-id pub-id-type="doi">10.1016/j.asw.2018.02.004</pub-id></element-citation></ref></ref-list></back></article>
