<?xml version="1.0" encoding="US-ASCII"?>
<!-- This template is for creating an Internet Draft using xml2rfc,
    which is available here: http://xml.resource.org. -->

<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
        <!ENTITY RFC2119 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml">
        <!ENTITY RFC2629 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2629.xml">
        <!ENTITY RFC3032 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3032.xml">
        <!ENTITY RFC3277 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3277.xml">
        <!ENTITY RFC3719 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3719.xml">
        <!ENTITY RFC4271 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4271.xml">
        <!ENTITY RFC5120 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5120.xml">
        <!ENTITY RFC5301 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5301.xml">
        <!ENTITY RFC5303 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5303.xml">
        <!ENTITY RFC5305 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5305.xml">
        <!ENTITY RFC5308 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5308.xml">
        <!ENTITY RFC5309 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5309.xml">
        <!ENTITY RFC5311 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5311.xml">
        <!ENTITY RFC5316 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5316.xml">
        <!ENTITY RFC5440 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5440.xml">
        <!ENTITY RFC5449 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5449.xml">
        <!ENTITY RFC5614 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5614.xml">
        <!ENTITY RFC5837 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5837.xml">
        <!ENTITY RFC5820 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5820.xml">
        <!ENTITY RFC6232 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.6232.xml">
        <!ENTITY RFC7356 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.7356.xml">
        <!ENTITY RFC7921 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.7921.xml">
        <!ENTITY RFC8174 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.8174.xml">
        <!ENTITY RFC8126 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.8126.xml">
        <!ENTITY RFC8296 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.8296.xml">
        ]>

<?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?>
<?rfc strict="yes" ?>
<?rfc toc="yes"?>
<?rfc tocdepth="4"?>
<?rfc symrefs="yes"?>
<?rfc sortrefs="yes" ?>
<?rfc compact="yes" ?>
<?rfc subcompact="no" ?>
<rfc category="exp" docName="draft-prz-lsr-hierarchical-snps-00" ipr="trust200902">

    <!-- ***** FRONT MATTER ***** -->

    <front>

        <title>ISIS Hierarchical SNPs</title>

        <author initials='T.' surname='Przygienda' fullname='Tony Przygienda'>
            <organization>Juniper Networks</organization>
            <address>
                <email>prz@juniper.net</email>
            </address>
        </author>

        <author initials='T.' surname='Li' fullname='Tony Li'>
            <organization>Juniper Networks</organization>
            <address>
                <email>tli@juniper.net</email>
            </address>
        </author>


        <date/>

        <abstract>
            <t>
                The document introduces an optional, new type of SNPs called Hierarchical SNP (HSNP) that can
                compress the traditional CSNPs exchange into a variant of merkle tree, hence
                allowing to support very large databases and adjacency numbers.
                Such an approach should lead in case of inconsistencies to much faster re-synchronization
                since only a subset of packets compared to full scale CSNP exchange
                is necessary to correct the entropy present.
            </t>


        </abstract>

    </front>

    <middle>

        <section title="Introduction">

            <t> The document introduces an optional, new type of SNPs called Hierarchical SNP (HSNP) that can
                compress the traditional CSNPs exchange into a variant of merkle tree <xref target="MERKLE"/>
                and skip lists <xref target="SKIP"/>, hence
                allowing to support very large databases and adjacency numbers.
                Such an approach should lead in case of inconsistencies to much faster re-synchronization
                since only a subset of packets compared to full scale CSNP exchange
                is necessary to correct the entropy present.
            </t>

            <t>
                Although the scheme can be applied recursively to the point where a single merkle hash represents
                the whole database, for practical purposes a two level tree allows compression for LSDB sizes
                of the 1E5 order and hence the document limits itself in examples to such magnitudes. Further
                considerations
                mentioned in
                <xref target="further"/>
                seem to make this limit practically prudent.
            </t>

            <t>
                We call
                CSNP entries of LSPs the zero level merkle hash (where we basically use lsp id, seq# and checksum
                as "hash") and the hash summarizing a set of those first level merkle hash and so on recursively.

            </t>

        </section>


        <section title="Suggested Dynamic Leaf Partitioning">

            <t>
                Practically speaking, the most interesting problem is the correct subdivision of the database into
                first level collection of leaf nodes (CSNP entries) that on one hand can provide a good compression,
                on the other hand the subdivision changes as little as possible. Otherwise the neighbors receiving the
                information
                may have to recompute the hashes rather than relying on a cache representing its own merkle tree.
                The subdivision should also produce enough "bins" no matter the distribution of the fragment IDs
                in the network. This is important to prevent such things as packing of all the fragments into a single checksum when
                e.g. a hash function degenerates. And ideally, a hash mismatch should produce not more than a single packet or two
                with lower
                level checksums or CSNPs to optimize re-convergence while minimizing amount of packets exchanged.
            </t>

            <t>
                To start with, in IS-IS networks
                we can fit into the prevailing 1500 bytes somewhat less than 100 of
                zero level entries for the foreseeable future. This is the consequence of
                CSNP entries consuming LSP ID + Fragment + Seq# + CSUM + Lifetime
                length which
                amounts to 8 + 2 + 2 + 2 = 14 bytes each.
            </t>

            <t> Subsequently, first and higher level hashes will occupy (as shown in <xref target="first-order"/>) the length
                of 7 + 7 + 4 = 18 bytes
                per hash
                and hence around 80 of those hashes fit into a packet.
            </t>

            <t>Those considerations lead to the suggested partitioning and packing scheme below. </t>

            <t>
                To start with, as stated, it is desirable to produce one packet on a miss on the merkle hash of first level
                leaf and hence such leaves will  pack initially 80
                LSP fragments with exceptions following later. We do not try to maximize the "initial packing". LSDB may grow and to
                maximize the chances of same "leaf packing" on both sides
                of an adjacency, even during flooding transitions, some
                "slack"
                is advisable.
            </t>

            <t>
                At higher levels, to begin with 60 hashes should be summarized by a higher level hash for the same reason.
            </t>

            <t>
                The packing will always put all fragments of a system into same leaf (which of course can exceed
                the advisable 80 fragments sometimes) and a first order leaf will be considered "full" if addition
                of next System ID fragments would exceed this size (except obviously, when the leaf is empty).
            </t>


            <section title="Repacking">
                <t>
                    During flooding transitions the databases on different nodes may be obviously different
                    for some period of time. As stated, even in such situations
                    it is beneficial for efficient implementations using caching to agree on both sides
                    on the ranges of the Merkle hashes advertised. Or alternately, have a fast way to
                    reconstruct the internal merkle hash for a range. E.g., in the suggested packing scheme,
                    since the nodes will always
                    agree on the "system-ID" boundary, a merkle hash per system ID can be  easily kept
                    and if the received ranges do not agree with cached
                    ranges the necessary merkle hash recomputed very quickly. As example,
                    in first level the worst case of 2 leaves
                    containing ~80 nodes each and "intersecting" the cached internal ranges an implementation
                    will have to merkle hash about 160 hashes (one per System ID) to validate the received value.
                </t>

                <t>Obviously, it is still highly preferred for the ranges on advertised merkle hashes
                to agree on their system ID ranges precisely and it is of most importance at the top level.
                Under stable conditions those are the merkle hashes
                reducing the CSNP exchange to minimal amount of packets and processing effort. </t>

                <t>
                    To settle on the same ranges in HSNPs an implementation of the suggested packing should let a
                    leaf that
                    drops under 50% occupancy "start robbing" system IDs from "left" of the next leaf until the current
                    leaf meets the "full condition". This is of course a recursive action that may ultimately generate
                    less leaves, remove some and in a recursive fashion lead to the same "greedy robbery from the left"
                    in the next level up. The "left" is colloquial here for starting with lowest System IDs under
                    normal sorting criteria.
                    On the other end of the spectrum a leaf that holds more than 150% of usual capacity (i.e. 80 * 1.5 LSPs or 60 * 1.5 Hashes)
                    should be preferably split into two leaves unless it holds a single System ID with more than 80 fragments.
                    Splitting the leaves may cause a repacking at a higher
                    level again in a recursive fashion.

                </t>

                <t>
                    The rebalancing of ranges to agree across all nodes and hence reduce hashing load
                    is a trade-off in terms of possibly large recomputation vs. suffering a penalty of
                    recomputing some hashes on disagreeing ranges on every exchange. Other solutions are
                    of course possible such as internal caches that keep the recomputed hashes for the
                    neighbor's ranges.
                </t>

                <t>
                    Precise splitting/merging algorithms agreed upon increase the likelihood of nodes ending up on
                    precisely same ranges. A possibly
                    simpler idea to discuss is to simply "repack" the whole thing on some balance violations or
                    periodically. Another idea is to simply use ISIS fragment sliding but this may lead in worst
                    case to first level checksumming a single fragment over time.
</t>
                <t>
                    Overall, different partitioning and packing approaches are possible but if system ID
                    as natural partition is not used, this will likely change the packet
                    format since the partition boundaries will necessarily reflect which of the fragments are
                    covered by the hashes. Although, given that ordering of fragments has to be preserved it is hard to
                    imagine anything else but start and end consisting of fragment IDs.
                </t>

            </section>



        </section>

        <section title="Fast, incremental, self-inverse hashing function">
            <t>
                Since we need to generate massive amount of hashes over sets of ever-changing fragments it is very
                desirable to use a specialized hash function that provides means for very fast incremental adjustment
                of hash result on fragments arriving, aging out or changing their checksums.
                Also, splitting or merging of leaves should allow to generate first and higher order hashes very quickly.
            </t>

            <t>
            Deeper considerations on such hashes can be found in
            <xref target="HASHES"/>
            but our design space
            is simplified due to irrelevance of security involved.
            </t>

            <t>
                We use a large prime number constant to prime all hashes.
            </t>

            <t>
                The hash function is a very fast XOR operation
                which incorporates the system IDs, checksums and numbering of fragments. It
                is 64 bits long for simplicity. Symbol &lt;&lt; is used for rotation (not shift) to the left.

            </t>

            <t>
                A hash of a first order leaf is nothing but (where all used values are converted to 64
                bit values first with any remaining high order bits zero'ed out)
                the usual XOR(hash, (system-id-of-fragment + 1) &lt;&lt; fragment#) followed by
                XOR(hash, (fragment# + 1) &lt;&lt; 32 | (fragment-seq# + 1) &lt;&lt;
                16 | (fragment-csum + 1)) for all contained fragments .
                Obviously, such an operation can be easily reversed to yield the previous
                hash with the fragment removed which allows for very fast removal, addition of fragments and updates
                of the relevant fields.
            </t>

            <t>
                <xref target="algo"/> includes reference code of the introduced hashing.
            </t>
            <t>Example of adding multiple fragments and removing those (in different order) which yields ultimately
            the same initial hash is found below:</t>

            <figure anchor="simple-hash">
            <artwork align="left" name="" type="" alt=""><![CDATA[
Initial Hash: CDDC72CF
Adding Fragment 0100.0000.0000.00 #: 0 CSUM: 1 SEQNR: 0
Hash after Addition 10001CDDD72CC
Adding Fragment 0100.0000.0000.00 #: 1 CSUM: 2 SEQNR: 1
Hash after Addition 1010003CDDF72CE
Adding Fragment 0200.0000.0000.00 #: 1 CSUM: 3 SEQNR: 2
Hash after Addition 3010001CDDC72CB
Removing Fragment 0100.0000.0000.00 #: 0 CSUM: 1 SEQNR: 0
Hash after Removal 3000000CDDD72C8
Removing Fragment 0100.0000.0000.00 #: 1 CSUM: 2 SEQNR: 1
Hash after Removal 2000002CDDF72CA
Removing Fragment 0200.0000.0000.00 #: 1 CSUM: 3 SEQNR: 2
Hash after Removal CDDC72CF
]]></artwork>
            </figure>

            <t>
                A hash of second order leaf is nothing but XOR of all the hashes of all the contained first order leaves.
                Again, this allows to update the hash when adding, removing a leaf or
                changing its checksum
                in a very fast and simple manner. The packing of second order leaves is determined by
                how many first order hashes can be fitted into a PDU normally and based on <xref target="format"/>
                we can set a second order leaf size to 70. This will cause a mismatch in the hash of a 2nd order leaf
                to advertise roughly a PDU full of first order leaves.
            </t>

            <t>
                Ultimately, and fairly obviously, third order hash uses second order hash logic to keep its hash.
                This all means that every time a first order leaf changes the
                contained system IDs for
                some reason the merkle hashes will have to be readjusted recursively in according 2nd and third
                order leaves. This is in itself nothing
                particular since
                <em>any</em>
                change on first order leaf hash forces change on second order and consequently third order leaf hash.
                This is how Merkle
                trees work after all.
            </t>

            <t>A first example will serve well here. We limit ourselves in the examples to consideration of a
                LSDB with 512 nodes with system identifiers of 1000.0000.00 &lt;2 digits node-id&gt; each holding
                32 fragments
                numbered 0 to 31.
                We leave the uneven node identifiers out to have some "holes" in the numbering to hit some corner
                cases in further examples.
                We disregard the pseudo node byte as simply another byte of system identifier since it does not
                contribute
                further details to the scheme and use value 0 in further text.

            </t>

            <t>
                In a stable state we can expect the following 128 first order leaves (each holding 2 systems worth
                of fragments) generating three packets.
                And as logical consequence two single second order leaves. First of the three first order packets
                will look roughly like this
            </t>

            <figure anchor="first-order">
            <artwork align="center" name="" type="" alt=""><![CDATA[
        ...

+--------------------------------------------+
|  Start System ID: 0000.0000.0000.00        |
+--------------------------------------------+
|  End System ID:   0000.0000.00A0.00        | // 80 hashes for 160 systems
+--------------------------------------------+
|  HSNP Level: 0                             |
+--------------------------------------------+
|  Start System ID: 1000.0000.0000.00        |
+--------------------------------------------+
|  End System ID:   1000.0000.0002.00        | // 64 fragments over 2 systems
+--------------------------------------------+
|              Merkle Hash                   |
+--------------------------------------------+
..
|  Start System ID: 1000.0000.008E.00        |
+--------------------------------------------+
|  End System ID:   1000.0000.00A0.00        |
+--------------------------------------------+
|              Merkle Hash                   |
+--------------------------------------------+
]]></artwork>
            </figure>

            <t>The 2nd order packet will look like this</t>

            <figure anchor="second-order">
            <artwork align="center" name="" type="" alt=""><![CDATA[
        ...

+--------------------------------------------+
|  Start System ID: 0000.0000.0000.00        |
+--------------------------------------------+
|  End System ID:   FFFF.FFFF.FFFF.FF        |
+--------------------------------------------+
|  HSNP Level: 1                             |
+--------------------------------------------+
|  Start System ID: 1000.0000.0000.00        |
+--------------------------------------------+
|  End System ID:   1000.0000.00A0.00        |
+--------------------------------------------+
|              Merkle Hash                   |
+--------------------------------------------+
|  Start System ID: 1000.0000.00A2.00        |
+--------------------------------------------+
|  End System ID:   1000.0000.0200.00        |
+--------------------------------------------+
|              Merkle Hash                   |
+--------------------------------------------+
]]></artwork>
            </figure>

        </section>

        <section title="HSNP PDU Format" anchor="format">
            <t>
                HSNP PDU Format follows closely CSNP format where instead of CSNP entries the according merkle
                hashes are propagated.
            </t>

            <artwork align="center" name="" type="" alt=""><![CDATA[
        ...

+--------------------------------------------+
|              PDU Length                    |
+--------------------------------------------+
|              Source ID                     |
+--------------------------------------------+
|              Start System ID               |
+--------------------------------------------+
|              End System ID                 |
+--------------------------------------------+
|              HSNP Level                    |
+--------------------------------------------+
|           Variable Length Fields           |
+--------------------------------------------+

]]></artwork>

            <t> Start and End System IDs are or the usual ID Length + 1 byte length and indicate, just like CSNP do,
                the range that the HSNP covers.
            </t>
            <t>
                HSNP level consists of 1 byte of level with 0 indicating "first level hashes".
            </t>
            <t>
                The variable length fields are a sorted sequence of covered ranges in the following format
            </t>

            <artwork align="center" name="" type="" alt=""><![CDATA[

+--------------------------------------------+
|              Start System ID               |
+--------------------------------------------+
|              End System ID                 |
+--------------------------------------------+
|              Merkle Hash                   |
+--------------------------------------------+
]]></artwork>

            <t>End System ID LSPs are  included in the hash.   </t>
            <t>
                Merkle hash consists of 4 bytes of the 64-bit computed hash with upper and lower 4 bytes XOR'ed together.
            </t>

            <t>
                This makes an entry in typical deployment scenarios 7 + 7 + 4 = 18 bytes long.
            </t>


        </section>

        <section title="Procedures">
            <section title="Maximum supported level negotiation in IIH">
                <t>
                IIH of nodes supporting this extension MUST include in IIH a new TLV that will indicate
                    support for reception of HSNPs. Additionally, the TLV will carry the maximum level
                    of HSNPs that the node will advertise. Each node MUST pick the minimum of
                    advertised levels on the adjacency and use that for the HSNPs it will advertise.
                    All nodes on the adjacency MUST advertise
                    the TLV on their IIHs, otherwise HSNPs are not used.
                </t>
            </section>

            <section title="Advertising HSNPs">
                <t>
                Advertising normal CSNPs is replaced with advertisement of HSNPs at highest negotiated level.
                    Under normal stable network condition this will be enough to maintain database integrity
                    across all nodes with a minimum of transmission and processing.
                </t>

                <t>
                    In case a node receives HSNPs where the merkle hash ranges are not the same, the node MUST
                    either to compute and verify the hashes over the ranges indicated or
                    disaggregate the overlapped ranges to a lower level HSNPs. The disaggregation is
                    less preferred since in case of range mismatches over all levels and both sides
                    using this strategy this can lead to a 'ping-pong' ending with CSNPs ultimately.
                </t>

                <t>
                A node receiving an HSNP where the hash received does not match on recomputation or
                    comparison the result on its own LSDB
                    SHOULD send immediately HSNPs of one level below with the Merkle hashes for the
                ranges where the hash mismatch was detected. Alternately, a node MAY choose to
                    immediately send according CSNPs, PSNPs or flood the LSPs that have been detected
                    as not matching the merkle hashes. Sending CSNPs may be preferable if the mismatch
                covers relatively few LSPs. </t>

                <t>In case a node detects that it holds Merkle hashes for LSPs that are not covered by the
                    received HSNP (start and end range and "holes" between ranges can be used to detect that
                    condition), it MUST trigger the same behavior as triggered by CSNP with this condition,
                    i.e. flood the missing LSPs.
                </t>


            </section>

        </section>

        <section title="Further Considerations" toc="default" anchor="further">

            <section title="Impact of Packet Losses">
                <t>
                    Many levels of aggregation will be more susceptible to packet losses since each loss
                    covers a much larger part of the LSDB. Additionally, during disaggregation where
                    multiple HSNP levels must be triggered, the loss will force a delay until the
                    higher level HSNP is regenerated.
                </t>
            </section>

            <section title="Decompression and Caching/Comparison Optimizations">
                <t>
                    As mentioned above a node may apply many strategies to speed up decompression.
                    LSPs missing in HSNPs as not covered by ranges are clearly "missing in action" and can
                    be reflooded, small ranges where merkle mismatched can generate CSNPs, PSNPs or lead to flooding.
                </t>
                <t>
                    Caching of hashes can be applied at many levels since the merkle hashes suggested here are
                    easily computed, even if certain elements must be removed from them, so e.g. on receiving a
                    range &lt;A -- B&gt; while the node already holds &lt; A -- B &amp; next-after-B&gt; can be simply
                    adjusted by removing 'next-after-B' merkle hash defined in this document from the cached result.
                </t>
            </section>

        </section> <!-- end of contributors -->

        <section title="Security Considerations" toc="default">

            <t>TBD
            </t>

        </section> <!-- end of security considerations -->

        <section anchor="IGP_IANA" title="IANA Section">
            <t>TBD
            </t>
        </section>




        <!-- 2 -->
        <section title="Contributors" toc="default">

            <t>TBD</t>

        </section> <!-- end of contributors -->

        <!-- 2 -->
        <section title="Acknowledgement" toc="default">

            <t>The discussions about "compressing CSNPs" go very long way back, allegedly to the times Radia Perlman was
                stalking the halls with insomniac Dave Katz. Recent efforts to push the scale of the protocol to new
                heights
                made the effort worth the effort (pun intended) to codify it into a standardized, practical
                engineering solution.
            </t>

        </section> <!-- end of contributors -->

    </middle>

    <back>


        <references title="Normative References">

            <!-- &RFC3032;
            &RFC8296;
            -->

            <reference anchor="MERKLE">
                <front>
                    <title>A Digital Signature Based on a Conventional Encryption Function</title>
                    <seriesInfo name="Advances in Cryptology &ndash; CRYPTO '87" value=""/>
                    <author initials="R.C." surname="Merkle">
                    </author>
                    <date year="1988"/>
                </front>
            </reference>


            <reference anchor="HASHES">
                <front>
                    <title>Security considerations for incremental hash functions based on pair block chaining</title>
                    <seriesInfo name="Computers and Security 25" value=""/>
                    <author initials="C.-W." surname="Phan">
                    </author>
                    <date year="2006"/>
                </front>
            </reference>

            <reference anchor="SKIP">
                <front>
                    <title>Skip lists: A probabilistic alternative to balanced trees</title>
                    <seriesInfo name="Communications of the ACM" value=""/>
                    <author initials="C.-W." surname="Phan">
                    </author>
                    <date year="1990"/>
                </front>
            </reference>

        </references> <!-- end of normative references -->

        <references title="Informative References">


        </references> <!-- end of informative references -->

        <section title="Reference Implementation of Fragment Hashing" anchor="algo">

            <artwork align="center" name="" type="" alt=""><![CDATA[
<CODE BEGINS>
#[derive(Eq, PartialEq, Hash)]
struct IncrementalHash(u64);

const SEED: u64 = 0xCDDC72CF;

impl Default for IncrementalHash {
    fn default() -> Self {
        IncrementalHash(SEED)
    }
}

impl IncrementalHash {
    // takes arbitrary number of byte arrays
    fn hash_in(&mut self, add: &[u64]) {
        for f in add {
            self.0 ^= f;
        }
    }

    fn hash_in_fragment(&mut self, sid: &SystemID, fragment_number: FragmentNr,
                        fragment:  &Arc<FragmentContent>) {
        let len = 8.min(sid.id.len());
        let mut sidc: [u8; 8] = [0; 8];
        sidc[(8-len) ..].copy_from_slice(&sid.id);
        sidc.rotate_left(fragment_number as _);

        self.hash_in(&[u64::from_be_bytes(sidc) + 1]);

        let frnr = fragment_number as u64 + 1;
        let frsn = fragment.seqnr as u64 + 1;
        let frcs = fragment.csum as u64 + 1;

        let hin = frnr << 32 | frsn << 16 | frcs;
        self.hash_in(&[hin]);
    }
}
<CODE ENDS>
]]></artwork>
        </section>

    </back>

</rfc>
