<?xml version="1.0" encoding="US-ASCII"?>

<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
<!ENTITY id.draft-irtf-nmrg-ai-challenges SYSTEM "http://xml.resource.org/public/rfc/bibxml3/reference.I-D.draft-irtf-nmrg-ai-challenges-00.xml">]>

<rfc category="info" docName="draft-oh-nmrg-ai-adp-01" ipr="trust200902">
    
  <?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?>
  <!-- used by XSLT processors -->
  <!-- For a complete list and description of processing instructions (PIs),
   please see http://xml.resource.org/authoring/README.html. -->
  <!-- Below are generally applicable Processing Instructions (PIs) that most I-Ds might want to use.
   (Here they are set differently than their defaults in xml2rfc v1.32) -->
  <?rfc strict="yes" ?>
  <!-- give errors regarding ID-nits and DTD validation -->
  <!-- control the table of contents (ToC) -->
  <?rfc toc="yes"?>
  <!-- generate a ToC -->
  <?rfc tocdepth="4"?>
  <!-- the number of levels of subsections in ToC. default: 3 -->
  <!-- control references -->
  <?rfc symrefs="yes"?>
  <!-- use symbolic references tags, i.e, [RFC2119] instead of [1] -->
  <?rfc sortrefs="no" ?>
  <!-- sort the reference entries alphabetically -->
  <!-- control vertical white space
   (using these PIs as follows is recommended by the RFC Editor) -->
  <?rfc compact="no" ?>
  <!-- do not start each main section on a new page -->
  <?rfc subcompact="no" ?>
  <!-- keep one blank line between list items -->
  <!-- end of list of popular I-D processing instructions -->

  <!-- ***** FRONT MATTER ***** -->
  <front>
      <!-- The abbreviated title is used in the page header - it is only necessary if the
       full title is longer than 39 characters -->

    <title abbrev="Automating Distributed Processing">AI-Based Distributed Processing Automation in Digital Twin Network</title>

    <!-- add 'role="editor"' below for the editors if appropriate -->
    <!-- Another author who claims to be an editor -->
    <author fullname="SeokBeom Oh" initials="S-B" surname="Oh">
        <organization>KSA</organization>

        <address>
            <postal>
                <street>Digital Transformation Center, 5</street>
                <street>Teheran-ro 69-gil, Gangnamgu</street>
                <city>Seoul</city>
                <region></region>
                <code>06160</code>
                <country>South Korea</country>
            </postal>
            <phone>+82 2 1670 6009</phone>
            <email>isb6655@korea.ac.kr</email>
        </address>
    </author>

    <author fullname="Yong-Geun Hong" initials="Y-G" surname="Hong">
        <organization>Daejeon University</organization>
        <address>
            <postal>
                <street>62 Daehak-ro, Dong-gu</street>
                <street></street>
                <city>Daejeon</city>
                <region></region>
                <code>34520</code>
                <country>South Korea</country>
            </postal>         
            <phone>+82 42 280 4841</phone>
            <email>yonggeun.hong@gmail.com</email>
            <!-- uri and facsimile elements may also be added -->
        </address>
    </author>

   <author fullname="Joo-Sang Youn" initials="J-S" surname="Youn">
        <organization>DONG-EUI University</organization>
      	<address>
        	<postal>
	          	<street>176 Eomgwangno Busan_jin_gu</street>
    	      	<city>Busan</city>
        	  	<region></region>
	       		<code>614-714</code>
    	      	<country>South Korea</country>
        	</postal>
        	<phone>+82 51 890 1993</phone>
        	<email>joosang.youn@gmail.com</email>
        	<!-- uri and facsimile elements may also be added -->
      	</address>
    </author>

    <author fullname="Hyunjeong Lee" initials="HJ." surname="Lee">
        <organization abbrev="ETRI">Electronics and Telecommunications Research Institute</organization>
        <address>
            <postal>
                <street>218 Gajeong-ro, Yuseong-gu</street>
                <city>Daejeon</city>
                <region></region>
                <code>34129</code>
                <country>South Korea</country>
            </postal>         
            <phone>+82 42 860 1213</phone>
            <email>hjlee294@etri.re.kr</email>
        </address>
    </author>

   <author fullname="Hyun-Kook Kahng" initials="H-K" surname="Kahng">
        <organization>Korea University</organization>

        <address>
            <postal>
                <street>2511 Sejong-ro</street>
                <city>Sejong City</city>
                <region></region>
                <code>30019</code>
                <country>Korea</country>
            </postal>
            <email>kahng@korea.ac.kr</email>
        </address>
    </author>

    <date day="23" month="October" year="2023" />
    <!-- If the month and year are both specified and are the current ones, xml2rfc will fill
     in the current day for you. If only the current year is specified, xml2rfc will fill
	 in the current day and month for you. If the year is not the current one, it is
	 necessary to specify at least a month (xml2rfc assumes day="1" if not specified for the
	 purpose of calculating the expiry date).  With drafts it is normally sufficient to
	 specify just the year. -->
    
    <!-- Meta-data Declarations -->
    
    <workgroup>Internet Research Task Force</workgroup>
    
    <!-- WG name at the upperleft corner of the doc,
     IETF is fine for individual submissions.
	 If this element is not present, the default is "Network Working Group",
     which is used by the RFC Editor as a nod to the history of the IETF. -->
    
    <keyword>Internet Draft</keyword>
    
    <!-- Keywords will be incorporated into HTML output
     files in a meta tag but they have no effect on text or nroff
     output. If you submit your draft to the RFC Editor, the
     keywords will be used for the search engine. -->

<!-- Abstract section -->
    <abstract>
      <t>This document discusses the use of AI technology and digital twin technology to automate the management of computer network resources distributed across different locations. Digital twin technology involves creating a virtual model of real-world physical objects or processes, which is utilized to analyze and optimize complex systems. In a digital twin network, AI-based network management by automating distributed processing involves utilizing deep learning algorithms to analyze network traffic, identify potential issues, and take proactive measures to prevent or mitigate those issues. Network administrators can efficiently manage and optimize their networks, thereby improving network performance and reliability. AI-based network management, utilizing digital twin network technology, also aids in optimizing network performance by identifying bottlenecks in the network and automatically adjusting network settings to enhance throughput and reduce latency. By implementing AI-based network management through automated distributed processing, organizations can improve network performance, and reduce the need for manual network management tasks.
</t>
    </abstract>
  </front>

  <middle>

<!-- Section 1 - Introductiontion -->		
    <section title="Introduction">
		<t>Due to industrial digitalization, the number of devices connected to the network is increasing rapidly. As the number of devices increases, the amount of data that needs to be processed in the network is increasing due to the interconnection between various devices.</t>

    <t>Existing network management was managed manually by administrators/operators, but network management becomes complicated, and the possibility of network malfunction increases, which can cause serious damage.</t>

    <t>Therefore, this document considers the configuration of systems using both digital twin technology and artificial intelligence (AI) technology for network management and operation, in order to adapt to the dynamically changing network environment. In this regard, AI technologies play a key role by maximizing the utilization of network resources. They achieve this by providing resource access control and optimal task distribution processing based on the characteristics of nodes that offer network functions for network management automation and operation<xref target="I-D.irtf-nmrg-ai-challenges"></xref>.</t>

 	  </section>

<!-- Section 2  -->
    <section title="Conventional Task Distributed Processing Techniques and Problems">

<!-- Section 2.1 -->
   	<section title="Challenges and Alternatives in Task Distributed Processing">

    <t>Conventional Task Distributed Processing Techniques refer to methods and approaches used to distribute computational tasks among multiple nodes in a network. These techniques are typically used in distributed computing environments to improve the efficiency and speed of processing large volumes of data.</t>

    <t>Some common conventional techniques used in task distributed processing include load balancing, parallel processing, and pipelining. Load balancing involves distributing tasks across multiple nodes in a way that minimizes the overall workload of each node, while parallel processing involves dividing a single task into multiple sub-tasks that can be processed simultaneously. Pipelining involves breaking a task into smaller stages, with each stage being processed by a different node.</t>

    <t>However, conventional task distributed processing techniques also face several challenges and problems. One of the main challenges is ensuring that tasks are distributed evenly among nodes, so that no single node is overburdened while others remain idle. Another challenge is managing the communication between nodes, as this can often be a bottleneck that slows down overall processing speed. Additionally, fault tolerance and reliability can be problematic, as a single node failure can disrupt the entire processing workflow.</t>

    <t>To address these challenges, new techniques such as edge computing, and distributed deep learning are being developed and used in modern distributed computing environments. The optimal resource must be allocated according to the characteristics of the node that provides the network function. Cloud servers generally have more powerful performance. However, to transfer data from the local machine to the cloud, it is necessary to move across multiple access networks, and it takes high latency and energy consumption because it processes and delivers a large number of packets. The MEC server is less powerful and less efficient than the cloud server, but it can be more efficient considering the overall delay and energy consumption because it is placed closer to the local machine<xref target="MEC.IEG006"></xref>. These architectures combine computing energy, telecommunications, storage, and energy resources flexibly, requiring service requests to be handled in consideration of various performance trade-offs.</t>

    <t>The existing distributed processing technique can divide the case according to the subject performing the service request as follows.</t>


  	<t>(1) All tasks are performed on the local machine.</t>

     <figure anchor="All_tasks_on_local-fig"
                    title="All tasks on local machine">
                    <artwork align="center">

      Local Machine
  +-------------------+
  | Perform all tasks |
  | on local machine  |
  |                   |
  |    +---------+    |
  |    |         |    |
  |    |         |    |
  |    |         |    |
  |    |         |    |
  |    +---------+    |
  |       Local       |
  +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       

		<t>(2) Some of the tasks are performed on the local machine and some are performed on the MEC server.</t>				

     <figure anchor="Some_tasks_on_local_MEC-fig"
                    title="Some tasks on local machine and MEC server">
                    <artwork align="center">

      Local Machine              MEC Server
  +-------------------+    +-------------------+
  |   Perform tasks   |    |   Perform tasks   |
  | on local machine  |    |   on MEC server   |
  |                   |    |                   |
  |    +---------+    |    |  +-------------+  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    +---------+    |    |  +-------------+  |
  |       Local       |    |        MEC        |
  +-------------------+    +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       

		<t>(3) Some of the tasks are performed on local machine and some are performed on cloud server</t>				

     <figure anchor="Some_tasks_on_local_cloud-fig"
                    title="Some tasks on local machine and cloud server">
                    <artwork align="center">

      Local Machine            Cloud Server
  +-------------------+    +-------------------+
  |   Perform tasks   |    |   Perform tasks   |
  | on local machine  |    |  on cloud server  |
  |                   |    |                   |
  |    +---------+    |    |  +-------------+  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    +---------+    |    |  +-------------+  |
  |       Local       |    |       Cloud       |
  +-------------------+    +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       

		<t>(4) Some of the tasks are performed on local machine, some on MEC servers, some on cloud servers</t>				
				
     <figure anchor="Some_tasks_on_local_MEC_cloud-fig"
                    title="Some tasks on local machine, MEC server, and cloud server">
                    <artwork align="center">

      Local Machine              MEC Server             Cloud Server
  +-------------------+    +-------------------+    +-------------------+
  |   Perform tasks   |    |   Perform tasks   |    |   Perform tasks   |
  | on local machine  |    |   on MEC server   |    |  on cloud server  |
  |                   |    |                   |    |                   |
  |    +---------+    |    |  +-------------+  |    |  +-------------+  |
  |    |         |    |    |  |             |  |    |  |             |  |
  |    |         |    |    |  |             |  |    |  |             |  |
  |    |         |    |    |  |             |  |    |  |             |  |
  |    |         |    |    |  |             |  |    |  |             |  |
  |    +---------+    |    |  +-------------+  |    |  +-------------+  |
  |       Local       |    |        MEC        |    |        Cloud      |
  +-------------------+    +-------------------+    +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       
            				
		<t>(5) Some of the tasks are performed on the MEC server and some are performed on the cloud server</t>				
				
     <figure anchor="Some_tasks_on_MEC_cloud-fig"
                    title="Some tasks on MEC server and cloud server">
                    <artwork align="center">

        MEC Server              Cloud Server
  +-------------------+    +-------------------+
  |   Perform tasks   |    |   Perform tasks   |
  |   on MEC server   |    |  on cloud server  |
  |                   |    |                   |
  |    +---------+    |    |  +-------------+  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    |         |    |    |  |             |  |
  |    +---------+    |    |  +-------------+  |
  |        MEC        |    |       Cloud       |
  +-------------------+    +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       
            				
		<t>(6) All tasks are performed on the MEC server</t>				

     <figure anchor="All_tasks_on_MEC-fig"
                    title="All tasks on MEC server">
                    <artwork align="center">
                    
        MEC Server
  +-------------------+
  | Perform all tasks |
  |   on MEC server   |
  |                   |
  |    +---------+    |
  |    |         |    |
  |    |         |    |
  |    |         |    |
  |    |         |    |
  |    +---------+    |
  |        MEC        |
  +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       
			
		<t>(7) All tasks are performed on cloud servers</t>				

     <figure anchor="All_tasks_on_cloud-fig"
                    title="All tasks on cloud server">
                    <artwork align="center">

      Cloud Server
  +-------------------+
  | Perform all tasks |
  |  on cloud server  |
  |                   |
  |    +---------+    |
  |    |         |    |
  |    |         |    |
  |    |         |    |
  |    |         |    |
  |    +---------+    |
  |       Cloud       |
  +-------------------+

                    </artwork>
                    <postamble></postamble>
            </figure>       		
    
 	  </section>

<!-- Section 2.2 -->
   	<section title="Considerations for Resource Allocation in Task Distributed Processing">
    
    <t>In addition, it is necessary to consider various environments depending on the delay time and the importance of energy consumption to determine which source is appropriate to handle requests for resource use. The importance of delay time and energy consumption depends on the service requirements for resource use. There is a need to adjust the traffic flow according to service requirements.</t>
    
 	  </section>    
    			
	</section>


<!-- Section 3 -->
    <section title="Requirements of Conventional Task Distributed Processing">

		<t>The requirements of task distributed processing refer to the key elements that must be considered and met to effectively distribute computing tasks across multiple nodes in a network. These requirements include:</t>

		<t>
			<list style="symbols">
				<t>Scalability: The ability to add or remove nodes from the network and distribute tasks efficiently and effectively, without compromising performance or functionality.</t>

				<t>Fault tolerance: The ability to handle node failures and network outages without disrupting overall system performance or task completion.</t>				

				<t>Load balancing: The ability to distribute tasks evenly across all nodes, ensuring that no single node becomes overwhelmed or underutilized.</t>				

				<t>Task coordination: The ability to manage task dependencies and ensure that tasks are completed in the correct order and on time.</t>				
				
				<t>Resource management: The ability to manage system resources such as memory, storage, and processing power effectively, to optimize task completion and minimize delays or errors.</t>				
				
				<t>Security: The ability to ensure the integrity and confidentiality of data and tasks, and protect against unauthorized access or tampering.</t>				
		
			</list>
		</t>

		<t>Meeting these requirements is essential to the successful implementation and operation of task distributed processing systems. The effective distribution of tasks across multiple nodes in a network can improve overall system performance and efficiency, while also increasing fault tolerance and scalability.</t>

	</section>


<!-- Section 4 -->
   <section title="Automating Distributed Processing with Digital Twin and AI">
   
   <t>Automating distributed processing utilizing digital twin technology involves digitally modeling physical objects and processes from the real world, enabling real-time tracking and manipulation. This technology enables real-time monitoring and manipulation, revolutionizing how we understand and manage complex networks.</t>
   
   <t>When combined with AI technology, these digital twins form a robust automated distributed processing system. For instance, digital twins can project all nodes and devices within a network digitally, The AI model can utilize various types of information, such as:</t>
   
	 <t>
  	<list style="symbols">
  	
			<t>Network data: Network-related data such as network traffic, packet loss, latency, bandwidth usage, etc., can be valuable for distributed processing automation. This data helps in understanding the current state and trends of the network, optimizing task distribution, and processing.</t>

			<t>Task and task characteristic data: Data that describes the characteristics and requirements of the tasks processed in the distributed processing system is also important. This can include the size, complexity, priority, dependencies, and other attributes of the tasks. Such data allows the AI technology to distribute tasks appropriately and allocate them to the optimal nodes.</t>				

			<t>Performance and resource data: Data related to the performance and resource usage of the distributed processing system is crucial. For example, data representing the processing capabilities of nodes, memory usage, bandwidth, etc., can be utilized to efficiently distribute tasks and optimize task processing.</t>				

			<t>Network configuration and device data: External environmental factors should also be considered. Data such as network topology, connectivity between nodes, energy consumption, temperature, etc., can be useful for optimizing task distribution and processing.</t>				
				
			</list>
		</t>
		   
<t>AI algorithms, based on this digital twin data, can automatically optimize network operations. For example, if overload is detected on a specific node, AI can redistribute tasks to other nodes, minimizing congestion. The real-time updates from digital twins enable continuous, optimal task distribution, allowing the network to adapt swiftly to changes.</t>

<t>By integrating digital twins and AI, the automated distributed processing system maximizes network performance while minimizing bottlenecks. This technology reduces the burden on network administrators, eliminating the need for manual adjustments and enhancing network flexibility and responsiveness.</t>
 
          
   </section>   


<!-- Section 5 - IANA Consideration -->
	<section anchor="IANA" title="IANA Considerations">
		<t>There are no IANA considerations related to this document.</t>
	</section>

<!-- Section 6 - Security Considerations -->
    <section title="Security Considerations">
    	<t>When providing AI services, it is essential to consider security measures to protect sensitive data such as network configurations, user information, and traffic patterns. Robust privacy measures must be in place to prevent unauthorized access and data breaches.</t>

			<t>Implementing effective access control mechanisms is essential to ensure that only authorized personnel or systems can access and modify the network management infrastructure. This involves managing user privileges, using authentication mechanisms, and enforcing strong password policies.</t>

			<t>Maintaining the security and integrity of the training data used for AI models is vital. It is important to ensure that the training data is unbiased, representative, and free from malicious content or data poisoning. This is crucial for the accuracy and reliability of the AI models.</t>

    </section>

<!-- Section 8 - Acknowledgements -->
		<section anchor="Acknowledgements" title="Acknowledgements">
      <t>TBA</t>
      
    </section>
    
</middle>

<!--  *****BACK MATTER ***** -->
<back>
    <!-- References split into informative and normative -->
    
    <!-- There are 2 ways to insert reference entries from the citation libraries:
     1. define an ENTITY at the top, and use "ampersand character"RFC2629; here (as shown)
     2. simply use a PI "less than character"?rfc include="reference.RFC.2119.xml"?> here
     (for I-Ds: include="reference.I-D.narten-iana-considerations-rfc2434bis.xml")
     
     Both are cited textually in the same manner: by using xref elements.
     If you use the PI option, xml2rfc will, by default, try to find included files in the same
     directory as the including file. You can also define the XML_LIBRARY environment variable
     with a value containing a set of directories to search.  These can be either in the local
     filing system or remote ones accessed by http (http://domain/dir/... ).-->

<!--
    <references title="Normative References">

      &rfc2119;  
      
    </references>
-->
        
 <references title="Informative References">

			&id.draft-irtf-nmrg-ai-challenges;		

  <reference anchor="MEC.IEG006">
         <front>
            <title>Mobile Edge Computing; Market Acceleration; MEC Metrics Best Practice and Guidelines
            </title>
            <author>
               <organization>ETSI</organization>
            </author>
            <date month="January" year="2017"/>
         </front>
            
        <seriesInfo name="Group Specification"
                    value="ETSI GS MEC-IEG 006 V1.1.1 (2017-01)"/>
      </reference>
			

</references>


</back>
</rfc>
