dynasmrt/
components.rs

1//! This module provides several reusable compoments for implementing assemblers
2
3use std::io;
4use std::collections::hash_map::Entry;
5use std::collections::BTreeMap;
6use std::sync::{Arc, RwLock, RwLockWriteGuard};
7use std::mem;
8
9use fnv::FnvHashMap;
10
11use crate::{DynamicLabel, AssemblyOffset, DynasmError, LabelKind, DynasmLabelApi};
12use crate::mmap::{ExecutableBuffer, MutableBuffer};
13use crate::relocations::{Relocation, RelocationKind, RelocationSize, ImpossibleRelocation};
14use crate::cache_control;
15
16/// A static label represents either a local label or a global label reference.
17///
18/// Global labels are unique names, which can be referenced multiple times, but only defined once
19/// (per-[crate::Assembler]).
20///
21/// Local labels are non-unique names. They can be referenced multiple times, and any reference
22/// indicates if they refer to a label after the reference, or a label before the reference.
23///
24/// A static label records how many local labels with the same name have been emitted beforehand
25/// so we can treat them as local labels as well.
26#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
27pub struct StaticLabel {
28    name: &'static str,
29    version: usize,
30}
31
32impl StaticLabel {
33    /// Create a new static label for a global label
34    pub fn global(name: &'static str) -> StaticLabel {
35        StaticLabel {
36            name,
37            version: 0
38        }
39    }
40
41    /// Create a new static label for a local label, with the given version id to distinguish it.
42    pub fn local(name: &'static str, version: usize) -> StaticLabel {
43        StaticLabel {
44            name,
45            version
46        }
47    }
48
49    /// Returns if this static label represents a global label
50    pub fn is_global(&self) -> bool {
51        self.version == 0
52    }
53
54    /// Returns if this static label represents a local label
55    pub fn is_local(&self) -> bool {
56        self.version != 0
57    }
58
59    /// Returns the static label targetting the label with the same name, after this one.
60    /// if it is a global label, returns a copy of itself.
61    pub fn next(mut self) -> StaticLabel {
62        self.version += (self.version != 0) as usize;
63        self
64    }
65
66    /// Returns the representation of the first local label used with the given name.
67    pub fn first(name: &'static str) -> StaticLabel {
68        StaticLabel {
69            name,
70            version: 1
71        }
72    }
73
74    /// Returns the name of this static label
75    pub fn get_name(&self) -> &'static str {
76        self.name
77    }
78}
79
80/// This struct implements a protection-swapping assembling buffer
81#[derive(Debug)]
82pub struct MemoryManager {
83    // buffer where the end result is copied into
84    execbuffer: Arc<RwLock<ExecutableBuffer>>,
85
86    // size of the allocated mmap (so we don't have to go through RwLock to get it)
87    execbuffer_size: usize,
88    // length of the allocated mmap that has been written into
89    asmoffset: usize,
90
91    // the address that the current execbuffer starts at
92    execbuffer_addr: usize
93}
94
95impl MemoryManager {
96    /// Create a new memory manager, with `initial_mmap_size` data allocated
97    pub fn new(initial_mmap_size: usize) -> io::Result<Self> {
98        let execbuffer = ExecutableBuffer::new(initial_mmap_size)?;
99        let execbuffer_addr = execbuffer.as_ptr() as usize;
100
101        Ok(MemoryManager {
102            execbuffer: Arc::new(RwLock::new(execbuffer)),
103            execbuffer_size: initial_mmap_size,
104            asmoffset: 0,
105            execbuffer_addr
106        })
107    }
108
109    /// Returns the amount of bytes already committed to the manager
110    pub fn committed(&self) -> usize {
111        self.asmoffset
112    }
113
114    /// Returns the current start address of the managed executable memory
115    pub fn execbuffer_addr(&self) -> usize {
116        self.execbuffer_addr
117    }
118
119    /// Commits the data from `new` into the managed memory, calling `f` when the buffer is moved to fix anything
120    /// that relies on the address of the buffer
121    pub fn commit<F>(&mut self, new: &mut Vec<u8>, f: F) where F: FnOnce(&mut [u8], usize, usize) {
122        let old_asmoffset = self.asmoffset;
123        let new_asmoffset = self.asmoffset + new.len();
124
125        if old_asmoffset >= new_asmoffset {
126            return;
127        }
128
129        // see if we need to request a new buffer
130        if new_asmoffset > self.execbuffer_size {
131            while self.execbuffer_size <= new_asmoffset {
132                self.execbuffer_size *= 2;
133            }
134
135            // create a larger writable buffer
136            let mut new_buffer = MutableBuffer::new(self.execbuffer_size).expect("Could not allocate a larger buffer");
137            new_buffer.set_len(new_asmoffset);
138
139            // copy over the data
140            new_buffer[.. old_asmoffset].copy_from_slice(&self.execbuffer.read().unwrap());
141            new_buffer[old_asmoffset..].copy_from_slice(new);
142            let new_buffer_addr = new_buffer.as_ptr() as usize;
143
144            // allow modifications to be made
145            f(&mut new_buffer, self.execbuffer_addr, new_buffer_addr);
146
147            // resynchronize the entire buffer
148            cache_control::synchronize_icache(&new_buffer);
149
150            // swap the buffers
151            self.execbuffer_addr = new_buffer_addr;
152            *self.execbuffer.write().unwrap() = new_buffer.make_exec().expect("Could not swap buffer protection modes")
153
154        } else {
155
156            // temporarily change the buffer protection modes and copy in new data
157            let mut lock = self.write();
158            let buffer = mem::replace(&mut *lock, ExecutableBuffer::default());
159            let mut buffer = buffer.make_mut().expect("Could not swap buffer protection modes");
160
161            // update buffer and length
162            buffer.set_len(new_asmoffset);
163            buffer[old_asmoffset..].copy_from_slice(new);
164
165            // ensure that no old data remains in the icache of what we just updated
166            cache_control::synchronize_icache(&buffer[old_asmoffset .. ]);
167
168            // repack the buffer
169            let buffer = buffer.make_exec().expect("Could not swap buffer protection modes");
170            *lock = buffer;
171        }
172
173        new.clear();
174        self.asmoffset = new_asmoffset;
175    }
176
177    /// Borrow the internal memory buffer mutably
178    pub fn write(&self) -> RwLockWriteGuard<ExecutableBuffer> {
179        self.execbuffer.write().unwrap()
180    }
181
182    /// Finalizes the currently committed part of the buffer.
183    pub fn finalize(self) -> Result<ExecutableBuffer, Self> {
184        match Arc::try_unwrap(self.execbuffer) {
185            Ok(execbuffer) => Ok(execbuffer.into_inner().unwrap()),
186            Err(arc) => Err(Self {
187                execbuffer: arc,
188                ..self
189            })
190        }
191    }
192
193    /// Create an atomically refcounted reference to the internal executable buffer
194    pub fn reader(&self) -> Arc<RwLock<ExecutableBuffer>> {
195        self.execbuffer.clone()
196    }
197}
198
199
200/// A registry of labels. Contains all necessessities for keeping track of dynasm labels.
201/// This is useful when implementing your own assembler and can also be used to query
202/// assemblers for the offsets of labels.
203#[derive(Debug, Clone, Default)]
204pub struct LabelRegistry {
205    // mapping of local + global labels to offsets
206    static_labels: FnvHashMap<StaticLabel, AssemblyOffset>,
207    // mapping of dynamic label ids to offsets
208    dynamic_labels: Vec<Option<AssemblyOffset>>,
209    // mapping of local label -> current generation. Generation starts at 1.
210    local_versions: FnvHashMap<&'static str, usize>,
211}
212
213impl LabelRegistry {
214    /// Create a new, empty label registry
215    pub fn new() -> LabelRegistry {
216        LabelRegistry {
217            static_labels: FnvHashMap::default(),
218            dynamic_labels: Vec::new(),
219            local_versions: FnvHashMap::default(),
220        }
221    }
222
223    /// Create a new, empty label registry with `capacity` space for each different label type.
224    pub fn with_capacity(locals: usize, globals: usize, dynamics: usize) -> LabelRegistry {
225        LabelRegistry {
226            static_labels: FnvHashMap::with_capacity_and_hasher(locals + globals, Default::default()),
227            dynamic_labels: Vec::with_capacity(dynamics),
228            local_versions: FnvHashMap::with_capacity_and_hasher(locals, Default::default()),
229        }
230    }
231
232    /// Clears the internal contents of this label registry, while maintaining the backing allocations.
233    pub fn clear(&mut self) {
234        self.static_labels.clear();
235        self.dynamic_labels.clear();
236        self.local_versions.clear();
237    }
238
239    /// Create a new dynamic label id
240    pub fn new_dynamic_label(&mut self) -> DynamicLabel {
241        let id = self.dynamic_labels.len();
242        self.dynamic_labels.push(None);
243        DynamicLabel(id)
244    }
245
246    /// Define a the dynamic label `id` to be located at `offset`.
247    pub fn define_dynamic(&mut self, id: DynamicLabel, offset: AssemblyOffset) -> Result<(), DynasmError> {
248        match self.dynamic_labels.get_mut(id.0) {
249            Some(Some(_)) => return Err(DynasmError::DuplicateLabel(LabelKind::Dynamic(id))),
250            Some(e)       => *e = Some(offset),
251            None          => return Err(DynasmError::UnknownLabel(LabelKind::Dynamic(id))),
252        }
253        Ok(())
254    }
255
256    /// Define a the global label `name` to be located at `offset`.
257    pub fn define_global(&mut self, name: &'static str, offset: AssemblyOffset) -> Result<(), DynasmError> {
258        match self.static_labels.entry(StaticLabel::global(name)) {
259            Entry::Occupied(_) => Err(DynasmError::DuplicateLabel(LabelKind::Global(name))),
260            Entry::Vacant(v) => {
261                v.insert(offset);
262                Ok(())
263            }
264        }
265    }
266
267    /// Define a the local label `name` to be located at `offset`.
268    pub fn define_local(&mut self, name: &'static str, offset: AssemblyOffset) {
269        let generation = match self.local_versions.entry(name) {
270            Entry::Occupied(mut o) => {
271                *o.get_mut() += 1;
272                *o.get()
273            },
274            Entry::Vacant(v) => {
275                v.insert(1);
276                1
277            }
278        };
279        self.static_labels.insert(StaticLabel::local(name, generation), offset);
280    }
281
282    /// Turns a local label into a static label, by adding some extra information to it
283    /// so we know what local label it is even after another has been defined
284    pub fn place_local_reference(&self, name: &'static str) -> Option<StaticLabel> {
285        self.local_versions.get(name).map(|&version| StaticLabel::local(name, version))
286    }
287
288    /// Returns the offset at which the dynamic label `id` was defined, if one was defined.
289    pub fn resolve_dynamic(&self, id: DynamicLabel) -> Result<AssemblyOffset, DynasmError> {
290        self.dynamic_labels.get(id.0).and_then(|&e| e).ok_or(DynasmError::UnknownLabel(LabelKind::Dynamic(id)))
291    }
292
293    /// Returns the offset at which the global label `label` was defined, if one was defined.
294    pub fn resolve_static(&self, label: &StaticLabel) -> Result<AssemblyOffset, DynasmError> {
295        self.static_labels.get(label).cloned().ok_or_else(|| DynasmError::UnknownLabel(
296            if label.is_global() {
297                LabelKind::Global(label.name)
298            } else {
299                LabelKind::Local(label.name)
300            }
301        ))
302    }
303}
304
305
306/// An abstraction of a relocation of type `R`, located at `location`.
307#[derive(Clone, Debug)]
308pub struct PatchLoc<R: Relocation> {
309    /// The AssemblyOffset at which this relocation was emitted
310    pub location: AssemblyOffset,
311    /// The offset, backwards, from location that the actual field to be modified starts at
312    pub field_offset: u8,
313    /// The offset, backwards, to be subtracted from location to get the address that the relocation should be calculated relative to.
314    pub ref_offset: u8,
315    /// The type of relocation to be emitted.
316    pub relocation: R,
317    /// A constant offset added to the destination address of this relocation when it is calculated.
318    pub target_offset: isize,
319}
320
321impl<R: Relocation> PatchLoc<R> {
322    /// create a new `PatchLoc`
323    pub fn new(location: AssemblyOffset, target_offset: isize, field_offset: u8, ref_offset: u8, relocation: R) -> PatchLoc<R> {
324        PatchLoc {
325            location,
326            field_offset,
327            ref_offset,
328            relocation,
329            target_offset
330        }
331    }
332
333    /// Returns a range that covers the entire relocation in its assembling buffer
334    /// `buf_offset` is a value that is subtracted from this range when the buffer you want to slice
335    /// with this range is only a part of a bigger buffer.
336    pub fn range(&self, buf_offset: usize) -> std::ops::Range<usize> {
337        let field_offset = self.location.0 - buf_offset -  self.field_offset as usize;
338        field_offset .. field_offset + self.relocation.size()
339    }
340
341    /// Returns the actual value that should be inserted at the relocation site.
342    pub fn value(&self, target: usize, buf_addr: usize) -> isize {
343        (match self.relocation.kind() {
344            RelocationKind::Relative => target.wrapping_sub(self.location.0 - self.ref_offset as usize),
345            RelocationKind::RelToAbs => target.wrapping_sub(self.location.0 - self.ref_offset as usize + buf_addr),
346            RelocationKind::AbsToRel => target + buf_addr
347        }) as isize + self.target_offset
348    }
349
350    /// Patch `buffer` so that this relocation patch will point to `target`.
351    /// `buf_addr` is the address that the assembling buffer will come to reside at when it is assembled.
352    /// `target` is the offset that this relocation will be targetting.
353    pub fn patch(&self, buffer: &mut [u8], buf_addr: usize, target: usize) -> Result<(), ImpossibleRelocation> {
354        let value = self.value(target, buf_addr);
355        self.relocation.write_value(buffer, value)
356    }
357
358    /// Patch `buffer` so that this relocation will still point to the right location due to a change in the address of the containing buffer.
359    /// `buffer` is a subsection of a larger buffer, located at offset `buf_offset` in this larger buffer.
360    /// `adjustment` is `new_buf_addr - old_buf_addr`.
361    pub fn adjust(&self, buffer: &mut [u8], adjustment: isize) -> Result<(), ImpossibleRelocation> {
362        let value = match self.relocation.kind() {
363            RelocationKind::Relative => return Ok(()),
364            RelocationKind::RelToAbs => self.relocation.read_value(buffer).wrapping_sub(adjustment),
365            RelocationKind::AbsToRel => self.relocation.read_value(buffer).wrapping_add(adjustment),
366        };
367        self.relocation.write_value(buffer, value)
368    }
369
370    /// Returns if this patch requires adjustment when the address of the buffer it resides in is altered.
371    pub fn needs_adjustment(&self) -> bool {
372        match self.relocation.kind() {
373            RelocationKind::Relative => false,
374            RelocationKind::RelToAbs
375            | RelocationKind::AbsToRel => true,
376        }
377    }
378}
379
380
381/// A registry of relocations and the respective labels they point towards.
382#[derive(Debug, Default)]
383pub struct RelocRegistry<R: Relocation> {
384    static_targets: Vec<(PatchLoc<R>, StaticLabel)>,
385    dynamic_targets: Vec<(PatchLoc<R>, DynamicLabel)>,
386}
387
388impl<R: Relocation> RelocRegistry<R> {
389    /// Create a new, empty relocation registry.
390    pub fn new() -> RelocRegistry<R> {
391        RelocRegistry {
392            static_targets: Vec::new(),
393            dynamic_targets: Vec::new(),
394        }
395    }
396
397    /// Create a new, empty relocation registry with reserved space for the specified amount of static and dynamic references
398    pub fn with_capacity(static_references: usize, dynamic_references: usize) -> RelocRegistry<R> {
399        RelocRegistry {
400            static_targets: Vec::with_capacity(static_references),
401            dynamic_targets: Vec::with_capacity(dynamic_references),
402        }
403    }
404
405    /// Add a new patch targetting a static label (either global or local).
406    pub fn add_static(&mut self, label: StaticLabel, patchloc: PatchLoc<R>) {
407        self.static_targets.push((patchloc, label));
408    }
409
410    /// Add a new patch targetting the dynamic label `id`.
411    pub fn add_dynamic(&mut self, id: DynamicLabel, patchloc: PatchLoc<R>) {
412        self.dynamic_targets.push((patchloc, id))
413    }
414
415    /// Return an iterator through all defined relocations targeting global labels and the labels they target.
416    /// These relocations are removed from the registry.
417    pub fn take_statics<'a>(&'a mut self) -> impl Iterator<Item=(PatchLoc<R>, StaticLabel)> + 'a {
418        self.static_targets.drain(..)
419    }
420
421    /// Return an iterator through all defined relocations targeting dynamic labels and the labels they target.
422    /// These relocations are removed from the registry.
423    pub fn take_dynamics<'a>(&'a mut self) -> impl Iterator<Item=(PatchLoc<R>, DynamicLabel)> + 'a {
424        self.dynamic_targets.drain(..)
425    }
426}
427
428
429/// A registry of relocations that have been encoded previously, but need to be adjusted when the address of the buffer they
430/// reside in changes.
431#[derive(Debug, Default)]
432pub struct ManagedRelocs<R: Relocation> {
433    managed: BTreeMap<usize, PatchLoc<R>>
434}
435
436impl<R: Relocation> ManagedRelocs<R> {
437    /// Create a new, empty managed relocation registry.
438    pub fn new() -> Self {
439        Self {
440            managed: BTreeMap::new()
441        }
442    }
443
444    /// Add a relocation to this registry.
445    pub fn add(&mut self, patchloc: PatchLoc<R>) {
446        self.managed.insert(patchloc.location.0 - patchloc.field_offset as usize, patchloc);
447    }
448
449    /// Take all items from another registry and add them to this registry
450    pub fn append(&mut self, other: &mut ManagedRelocs<R>) {
451        self.managed.append(&mut other.managed);
452    }
453
454    /// Remove all managed relocations whose byte fields start in the range start .. end.
455    /// This is useful when implementing an `Assembler::alter` API, as any managed relocations
456    /// that were overwritten should be removed from the registry, otherwise the replacement code
457    /// would be corrupted when managed relocations are updated.
458    pub fn remove_between(&mut self, start: usize, end: usize) {
459        if start == end {
460            return;
461        }
462
463        let keys: Vec<_> = self.managed.range(start .. end).map(|(&k, _)| k).collect();
464        for k in keys {
465            self.managed.remove(&k);
466        }
467    }
468
469    /// Iterate through all defined managed relocations.
470    pub fn iter<'a>(&'a self) -> impl Iterator<Item=&'a PatchLoc<R>> + 'a {
471        self.managed.values()
472    } 
473}
474
475
476#[derive(Clone, Debug)]
477enum LitPoolEntry {
478    U8(u8),
479    U16(u16),
480    U32(u32),
481    U64(u64),
482    Dynamic(RelocationSize, DynamicLabel),
483    Global(RelocationSize, &'static str),
484    Forward(RelocationSize, &'static str),
485    Backward(RelocationSize, &'static str),
486    Align(u8, usize),
487}
488
489/// Literal pool implementation. One can programmatically push items in this literal pool and retrieve offsets to them in the pool.
490/// Then later, the pool will be encoded into the instruction stream and items can be retrieved using the address of the literal pool.
491/// and the previously emitted offsets. Values are always at least aligned to their size.
492#[derive(Clone, Debug, Default)]
493pub struct LitPool {
494    offset: usize,
495    entries: Vec<LitPoolEntry>,
496}
497
498impl LitPool {
499    /// Create a new, empty literal pool
500    pub fn new() -> Self {
501        LitPool {
502            offset: 0,
503            entries: Vec::new(),
504        }
505    }
506
507    // align the pool to the specified size, record the offset, and bump the offset
508    fn bump_offset(&mut self, size: RelocationSize) -> isize {
509        // Correct for alignment
510        self.align(size as usize, 0);
511        let offset = self.offset;
512        self.offset += size as usize;
513        offset as isize
514    }
515
516    /// Add extra alignment for the next value in the literal pool
517    pub fn align(&mut self, size: usize, with: u8) {
518        let misalign = self.offset % size;
519        if misalign == 0 {
520            return;
521        }
522
523        self.entries.push(LitPoolEntry::Align(with, size));
524        self.offset += size - misalign;
525    }
526
527    /// Encode `value` into the literal pool.
528    pub fn push_u8(&mut self, value: u8) -> isize {
529        let offset = self.bump_offset(RelocationSize::Byte);
530        self.entries.push(LitPoolEntry::U8(value));
531        offset
532    }
533
534    /// Encode `value` into the literal pool.
535    pub fn push_u16(&mut self, value: u16) -> isize {
536        let offset = self.bump_offset(RelocationSize::Word);
537        self.entries.push(LitPoolEntry::U16(value));
538        offset
539    }
540
541    /// Encode `value` into the literal pool.
542    pub fn push_u32(&mut self, value: u32) -> isize {
543        let offset = self.bump_offset(RelocationSize::DWord);
544        self.entries.push(LitPoolEntry::U32(value));
545        offset
546    }
547
548    /// Encode `value` into the literal pool.
549    pub fn push_u64(&mut self, value: u64) -> isize {
550        let offset = self.bump_offset(RelocationSize::QWord);
551        self.entries.push(LitPoolEntry::U64(value));
552        offset
553    }
554
555    /// Encode the relative address of a label into the literal pool (relative to the location in the pool)
556    pub fn push_dynamic(&mut self, id: DynamicLabel, size: RelocationSize) -> isize {
557        let offset = self.bump_offset(size);
558        self.entries.push(LitPoolEntry::Dynamic(size, id));
559        offset
560    }
561
562    /// Encode the relative address of a label into the literal pool (relative to the location in the pool)
563    pub fn push_global(&mut self, name: &'static str, size: RelocationSize) -> isize {
564        let offset = self.bump_offset(size);
565        self.entries.push(LitPoolEntry::Global(size, name));
566        offset
567    }
568
569    /// Encode the relative address of a label into the literal pool (relative to the location in the pool)
570    pub fn push_forward(&mut self, name: &'static str, size: RelocationSize) -> isize {
571        let offset = self.bump_offset(size);
572        self.entries.push(LitPoolEntry::Forward(size, name));
573        offset
574    }
575
576    /// Encode the relative address of a label into the literal pool (relative to the location in the pool)
577    pub fn push_backward(&mut self, name: &'static str, size: RelocationSize) -> isize {
578        let offset = self.bump_offset(size);
579        self.entries.push(LitPoolEntry::Backward(size, name));
580        offset
581    }
582
583    fn pad_sized<D: DynasmLabelApi>(size: RelocationSize, assembler: &mut D) {
584        match size {
585            RelocationSize::Byte => assembler.push(0),
586            RelocationSize::Word => assembler.push_u16(0),
587            RelocationSize::DWord => assembler.push_u32(0),
588            RelocationSize::QWord => assembler.push_u64(0),
589        }
590    }
591
592    /// Emit this literal pool into the specified assembler
593    pub fn emit<D: DynasmLabelApi>(self, assembler: &mut D) {
594        for entry in self.entries {
595            match entry {
596                LitPoolEntry::U8(value) => assembler.push(value),
597                LitPoolEntry::U16(value) => assembler.push_u16(value),
598                LitPoolEntry::U32(value) => assembler.push_u32(value),
599                LitPoolEntry::U64(value) => assembler.push_u64(value),
600                LitPoolEntry::Dynamic(size, id) => {
601                    Self::pad_sized(size, assembler);
602                    assembler.dynamic_relocation(id, 0, size as u8, size as u8, D::Relocation::from_size(size));
603                },
604                LitPoolEntry::Global(size, name) => {
605                    Self::pad_sized(size, assembler);
606                    assembler.global_relocation(name, 0, size as u8, size as u8, D::Relocation::from_size(size));
607                },
608                LitPoolEntry::Forward(size, name) => {
609                    Self::pad_sized(size, assembler);
610                    assembler.forward_relocation(name, 0, size as u8, size as u8, D::Relocation::from_size(size));
611                },
612                LitPoolEntry::Backward(size, name) => {
613                    Self::pad_sized(size, assembler);
614                    assembler.backward_relocation(name, 0, size as u8, size as u8, D::Relocation::from_size(size));
615                },
616                LitPoolEntry::Align(with, alignment) => assembler.align(alignment, with),
617            }
618        }
619    }
620}
621
622#[cfg(test)]
623mod tests {
624    use crate::*;
625    use relocations::RelocationSize;
626
627    #[test]
628    fn test_litpool_size() {
629        test_litpool::<RelocationSize>();
630    }
631
632    #[test]
633    fn test_litpool_x64() {
634        test_litpool::<x64::X64Relocation>();
635    }
636
637    #[test]
638    fn test_litpool_x86() {
639        test_litpool::<x86::X86Relocation>();
640    }
641
642    #[test]
643    fn test_litpool_aarch64() {
644        test_litpool::<aarch64::Aarch64Relocation>();
645    }
646
647    fn test_litpool<R: Relocation + Debug>() {
648        let mut ops = Assembler::<R>::new().unwrap();
649        let dynamic1 = ops.new_dynamic_label();
650
651        let mut pool = components::LitPool::new();
652
653        ops.local_label("backward1");
654
655        assert_eq!(pool.push_u8(0x12), 0);
656        assert_eq!(pool.push_u8(0x34), 1);
657        assert_eq!(pool.push_u8(0x56), 2);
658
659        assert_eq!(pool.push_u16(0x789A), 4);
660
661        assert_eq!(pool.push_u32(0xBCDE_F012), 8);
662
663        assert_eq!(pool.push_u64(0x3456_789A_BCDE_F012), 16);
664
665        assert_eq!(pool.push_forward("forward1", RelocationSize::Byte), 24);
666
667        pool.align(4, 0xCC);
668
669        assert_eq!(pool.push_global("global1", RelocationSize::Word), 28);
670
671        assert_eq!(pool.push_dynamic(dynamic1, RelocationSize::DWord), 32);
672
673        assert_eq!(pool.push_backward("backward1", RelocationSize::QWord), 40);
674
675        pool.emit(&mut ops);
676
677        assert_eq!(ops.offset().0, 48);
678
679        ops.local_label("forward1");
680        ops.global_label("global1");
681        ops.dynamic_label(dynamic1);
682
683        assert_eq!(ops.commit(), Ok(()));
684        let buf = ops.finalize().unwrap();
685
686        assert_eq!(&*buf, &[
687            0x12, 0x34, 0x56, 0x00, 0x9A, 0x78, 0x00, 0x00,
688            0x12, 0xF0, 0xDE, 0xBC, 0x00, 0x00, 0x00, 0x00,
689            0x12, 0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34,
690            24  , 0xCC, 0xCC, 0xCC, 20  , 0   , 0x00, 0x00,
691            16  , 0   , 0   , 0   , 0x00, 0x00, 0x00, 0x00,
692            0xD8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFFu8, 
693        ] as &[u8]);
694    }
695}