diff options
Diffstat (limited to 'linden/indra/libgcrypt/libgcrypt-1.2.2/mpi/alpha/mpih-mul2.S')
-rw-r--r--[-rwxr-xr-x] | linden/indra/libgcrypt/libgcrypt-1.2.2/mpi/alpha/mpih-mul2.S | 194 |
1 files changed, 97 insertions, 97 deletions
diff --git a/linden/indra/libgcrypt/libgcrypt-1.2.2/mpi/alpha/mpih-mul2.S b/linden/indra/libgcrypt/libgcrypt-1.2.2/mpi/alpha/mpih-mul2.S index 5e5ce5a..5eb6b98 100755..100644 --- a/linden/indra/libgcrypt/libgcrypt-1.2.2/mpi/alpha/mpih-mul2.S +++ b/linden/indra/libgcrypt/libgcrypt-1.2.2/mpi/alpha/mpih-mul2.S | |||
@@ -1,97 +1,97 @@ | |||
1 | /* Alpha 21064 addmul_1 -- Multiply a limb vector with a limb and add | 1 | /* Alpha 21064 addmul_1 -- Multiply a limb vector with a limb and add |
2 | * the result to a second limb vector. | 2 | * the result to a second limb vector. |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1994, 1995, 1998, | 4 | * Copyright (C) 1992, 1994, 1995, 1998, |
5 | * 2001, 2002 Free Software Foundation, Inc. | 5 | * 2001, 2002 Free Software Foundation, Inc. |
6 | * | 6 | * |
7 | * This file is part of Libgcrypt. | 7 | * This file is part of Libgcrypt. |
8 | * | 8 | * |
9 | * Libgcrypt is free software; you can redistribute it and/or modify | 9 | * Libgcrypt is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU Lesser General Public License as | 10 | * it under the terms of the GNU Lesser General Public License as |
11 | * published by the Free Software Foundation; either version 2.1 of | 11 | * published by the Free Software Foundation; either version 2.1 of |
12 | * the License, or (at your option) any later version. | 12 | * the License, or (at your option) any later version. |
13 | * | 13 | * |
14 | * Libgcrypt is distributed in the hope that it will be useful, | 14 | * Libgcrypt is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU Lesser General Public License for more details. | 17 | * GNU Lesser General Public License for more details. |
18 | * | 18 | * |
19 | * You should have received a copy of the GNU Lesser General Public | 19 | * You should have received a copy of the GNU Lesser General Public |
20 | * License along with this program; if not, write to the Free Software | 20 | * License along with this program; if not, write to the Free Software |
21 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | 21 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA |
22 | */ | 22 | */ |
23 | 23 | ||
24 | 24 | ||
25 | /******************* | 25 | /******************* |
26 | * mpi_limb_t | 26 | * mpi_limb_t |
27 | * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (r16) | 27 | * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (r16) |
28 | * mpi_ptr_t s1_ptr, (r17) | 28 | * mpi_ptr_t s1_ptr, (r17) |
29 | * mpi_size_t s1_size, (r18) | 29 | * mpi_size_t s1_size, (r18) |
30 | * mpi_limb_t s2_limb) (r19) | 30 | * mpi_limb_t s2_limb) (r19) |
31 | * | 31 | * |
32 | * This code runs at 42 cycles/limb on EV4 and 18 cycles/limb on EV5. | 32 | * This code runs at 42 cycles/limb on EV4 and 18 cycles/limb on EV5. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | 35 | ||
36 | .set noreorder | 36 | .set noreorder |
37 | .set noat | 37 | .set noat |
38 | .text | 38 | .text |
39 | .align 3 | 39 | .align 3 |
40 | .globl _gcry_mpih_addmul_1 | 40 | .globl _gcry_mpih_addmul_1 |
41 | .ent _gcry_mpih_addmul_1 2 | 41 | .ent _gcry_mpih_addmul_1 2 |
42 | _gcry_mpih_addmul_1: | 42 | _gcry_mpih_addmul_1: |
43 | .frame $30,0,$26 | 43 | .frame $30,0,$26 |
44 | 44 | ||
45 | ldq $2,0($17) # $2 = s1_limb | 45 | ldq $2,0($17) # $2 = s1_limb |
46 | addq $17,8,$17 # s1_ptr++ | 46 | addq $17,8,$17 # s1_ptr++ |
47 | subq $18,1,$18 # size-- | 47 | subq $18,1,$18 # size-- |
48 | mulq $2,$19,$3 # $3 = prod_low | 48 | mulq $2,$19,$3 # $3 = prod_low |
49 | ldq $5,0($16) # $5 = *res_ptr | 49 | ldq $5,0($16) # $5 = *res_ptr |
50 | umulh $2,$19,$0 # $0 = prod_high | 50 | umulh $2,$19,$0 # $0 = prod_high |
51 | beq $18,.Lend1 # jump if size was == 1 | 51 | beq $18,.Lend1 # jump if size was == 1 |
52 | ldq $2,0($17) # $2 = s1_limb | 52 | ldq $2,0($17) # $2 = s1_limb |
53 | addq $17,8,$17 # s1_ptr++ | 53 | addq $17,8,$17 # s1_ptr++ |
54 | subq $18,1,$18 # size-- | 54 | subq $18,1,$18 # size-- |
55 | addq $5,$3,$3 | 55 | addq $5,$3,$3 |
56 | cmpult $3,$5,$4 | 56 | cmpult $3,$5,$4 |
57 | stq $3,0($16) | 57 | stq $3,0($16) |
58 | addq $16,8,$16 # res_ptr++ | 58 | addq $16,8,$16 # res_ptr++ |
59 | beq $18,.Lend2 # jump if size was == 2 | 59 | beq $18,.Lend2 # jump if size was == 2 |
60 | 60 | ||
61 | .align 3 | 61 | .align 3 |
62 | .Loop: mulq $2,$19,$3 # $3 = prod_low | 62 | .Loop: mulq $2,$19,$3 # $3 = prod_low |
63 | ldq $5,0($16) # $5 = *res_ptr | 63 | ldq $5,0($16) # $5 = *res_ptr |
64 | addq $4,$0,$0 # cy_limb = cy_limb + 'cy' | 64 | addq $4,$0,$0 # cy_limb = cy_limb + 'cy' |
65 | subq $18,1,$18 # size-- | 65 | subq $18,1,$18 # size-- |
66 | umulh $2,$19,$4 # $4 = cy_limb | 66 | umulh $2,$19,$4 # $4 = cy_limb |
67 | ldq $2,0($17) # $2 = s1_limb | 67 | ldq $2,0($17) # $2 = s1_limb |
68 | addq $17,8,$17 # s1_ptr++ | 68 | addq $17,8,$17 # s1_ptr++ |
69 | addq $3,$0,$3 # $3 = cy_limb + prod_low | 69 | addq $3,$0,$3 # $3 = cy_limb + prod_low |
70 | cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) | 70 | cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) |
71 | addq $5,$3,$3 | 71 | addq $5,$3,$3 |
72 | cmpult $3,$5,$5 | 72 | cmpult $3,$5,$5 |
73 | stq $3,0($16) | 73 | stq $3,0($16) |
74 | addq $16,8,$16 # res_ptr++ | 74 | addq $16,8,$16 # res_ptr++ |
75 | addq $5,$0,$0 # combine carries | 75 | addq $5,$0,$0 # combine carries |
76 | bne $18,.Loop | 76 | bne $18,.Loop |
77 | 77 | ||
78 | .Lend2: mulq $2,$19,$3 # $3 = prod_low | 78 | .Lend2: mulq $2,$19,$3 # $3 = prod_low |
79 | ldq $5,0($16) # $5 = *res_ptr | 79 | ldq $5,0($16) # $5 = *res_ptr |
80 | addq $4,$0,$0 # cy_limb = cy_limb + 'cy' | 80 | addq $4,$0,$0 # cy_limb = cy_limb + 'cy' |
81 | umulh $2,$19,$4 # $4 = cy_limb | 81 | umulh $2,$19,$4 # $4 = cy_limb |
82 | addq $3,$0,$3 # $3 = cy_limb + prod_low | 82 | addq $3,$0,$3 # $3 = cy_limb + prod_low |
83 | cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) | 83 | cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low) |
84 | addq $5,$3,$3 | 84 | addq $5,$3,$3 |
85 | cmpult $3,$5,$5 | 85 | cmpult $3,$5,$5 |
86 | stq $3,0($16) | 86 | stq $3,0($16) |
87 | addq $5,$0,$0 # combine carries | 87 | addq $5,$0,$0 # combine carries |
88 | addq $4,$0,$0 # cy_limb = prod_high + cy | 88 | addq $4,$0,$0 # cy_limb = prod_high + cy |
89 | ret $31,($26),1 | 89 | ret $31,($26),1 |
90 | .Lend1: addq $5,$3,$3 | 90 | .Lend1: addq $5,$3,$3 |
91 | cmpult $3,$5,$5 | 91 | cmpult $3,$5,$5 |
92 | stq $3,0($16) | 92 | stq $3,0($16) |
93 | addq $0,$5,$0 | 93 | addq $0,$5,$0 |
94 | ret $31,($26),1 | 94 | ret $31,($26),1 |
95 | 95 | ||
96 | .end _gcry_mpih_addmul_1 | 96 | .end _gcry_mpih_addmul_1 |
97 | 97 | ||