__int64 __fastcall sub_7FF620CE22E0(__int64 a1, _BYTE *a2) { _BYTE *v2; // r13 __int64 v3; // rdi unsigned __int64 v4; // r11 __int64 v5; // r15 __int64 v6; // r14 unsigned __int64 v7; // r8 unsigned int v8; // ebp unsigned __int64 v9; // rbx _QWORD *v10; // r10 unsigned int v11; // er11 unsigned __int64 v12; // rdx unsigned __int64 v13; // r9 __int64 v14; // rcx unsigned __int64 v15; // rdx unsigned __int64 v16; // rcx unsigned __int64 v17; // rdx unsigned __int64 v18; // r9 __int64 v19; // rdx unsigned __int8 *v20; // rcx __int64 v21; // rax __int64 v22; // rcx __int64 v23; // rax unsigned __int64 v24; // r11 unsigned __int64 v25; // r14 unsigned __int64 v26; // rbx _DWORD *v27; // r10 int v28; // er11 unsigned int v29; // edx unsigned int v30; // ecx unsigned int v31; // edx unsigned int v32; // er9 unsigned int v33; // ecx unsigned int v34; // edx unsigned int v35; // ecx unsigned int v36; // edx __int64 v37; // rdx unsigned __int8 *v38; // rcx __int64 v39; // rax __int64 v40; // rcx __int64 v41; // rax unsigned __int64 v42; // r11 unsigned __int64 v43; // r14 unsigned __int64 v44; // r12 char v45; // di char v46; // si unsigned int v47; // er11 char v48; // dl char v49; // cl unsigned __int64 v50; // r11 unsigned __int64 v51; // r14 __int64 result; // rax __int64 v53; // [rsp+50h] [rbp+8h] v53 = a1; v2 = a2; v3 = a1; j_memcpy_0(a2, (const void *)(a1 + 14), *(unsigned __int16 *)(a1 + 12)); v4 = *(unsigned int *)(v3 + 8); v5 = 0i64; v6 = 2685821657736338717i64 * (v4 ^ (unsigned int)(*(_DWORD *)(v3 + 8) << 25) ^ ((v4 ^ (v4 >> 15)) >> 12)); v7 = 2685821657736338717i64 * ((unsigned int)(v4 - 111492228) ^ (unsigned int)(((_DWORD)v4 - 4) << 25) ^ (((unsigned int)(v4 - 111492228) ^ ((unsigned __int64)(unsigned int)(v4 - 111492228) >> 15)) >> 12)); v8 = 8 * *(unsigned __int16 *)(v3 + 12) + 1; if ( v8 >= 0x40 ) { v9 = (unsigned __int64)v8 >> 6; do { v10 = &v2[(unsigned int)v5]; v11 = __ROR4__(__ROR4__(v4, 1), 1); v12 = (2i64 * ~*v10) ^ ((2i64 * ~*v10) ^ (~*v10 >> 1)) & 0x5555555555555555i64; v13 = (16 * ((4 * v12) ^ ((4 * v12) ^ (v12 >> 2)) & 0x3333333333333333i64)) ^ ((16 * ((4 * v12) ^ ((4 * v12) ^ (v12 >> 2)) & 0x3333333333333333i64)) ^ (((4 * v12) ^ ((4 * v12) ^ (v12 >> 2)) & 0x3333333333333333i64) >> 4)) & 0xF0F0F0F0F0F0F0Fi64; v14 = __ROL8__((v13 << 8) ^ ((v13 << 8) ^ (v13 >> 8)) & 0xFF00FF00FF00FFi64, 32); v15 = (2 * (v11 + v14)) ^ ((2 * (v11 + v14)) ^ (((unsigned __int64)v11 + v14) >> 1)) & 0x5555555555555555i64; v16 = (2 * v15) ^ ((2 * v15) ^ (v15 >> 1)) & 0x5555555555555555i64; v17 = (4 * v16) ^ ((4 * v16) ^ (v16 >> 2)) & 0x3333333333333333i64; v18 = (16 * v17) ^ ((16 * v17) ^ (v17 >> 4)) & 0xF0F0F0F0F0F0F0Fi64; v19 = 8i64; *v10 = __ROL8__((v18 << 8) ^ ((v18 << 8) ^ (v18 >> 8)) & 0xFF00FF00FF00FFi64, 32); v20 = &v2[(unsigned int)v5]; do { v21 = *v20++; *(v20 - 1) = *((_BYTE *)&_ImageBase + v21 + 63173072); --v19; } while ( v19 ); v22 = 8i64; *v10 ^= ~(unsigned __int64)v11; do { v23 = *(unsigned __int8 *)v10; v10 = (_QWORD *)((char *)v10 + 1); *((_BYTE *)v10 - 1) = *((_BYTE *)&_ImageBase + v23 + 63173072); --v22; } while ( v22 ); v24 = v6 + v7; v5 = (unsigned int)(v5 + 8); v25 = v7 ^ v6; v4 = v24 >> 32; v8 -= 64; v7 = v25 ^ (v25 << 14) ^ __ROR8__(v7, 9); v6 = __ROR8__(v25, 28); --v9; } while ( v9 ); v3 = v53; } if ( v8 >= 0x20 ) { v26 = (unsigned __int64)v8 >> 5; do { v27 = &v2[(unsigned int)v5]; v28 = __ROL4__(__ROL4__(v4, 1), 1); v29 = (2 * ~*v27) ^ ((2 * ~*v27) ^ ((unsigned int)~*v27 >> 1)) & 0x55555555; v30 = (4 * v29) ^ ((4 * v29) ^ (v29 >> 2)) & 0x33333333; v31 = (16 * v30) ^ ((16 * v30) ^ (v30 >> 4)) & 0xF0F0F0F; v32 = __ROL4__((v31 << 8) ^ ((v31 << 8) ^ (v31 >> 8)) & 0xFF00FF, 16) + v28; v33 = (2 * v32) ^ ((2 * v32) ^ (v32 >> 1)) & 0x55555555; v34 = (2 * v33) ^ ((2 * v33) ^ (v33 >> 1)) & 0x55555555; v35 = (4 * v34) ^ ((4 * v34) ^ (v34 >> 2)) & 0x33333333; v36 = (16 * v35) ^ ((16 * v35) ^ (v35 >> 4)) & 0xF0F0F0F; *v27 = __ROL4__((v36 << 8) ^ ((v36 << 8) ^ (v36 >> 8)) & 0xFF00FF, 16); v37 = 4i64; v38 = &v2[(unsigned int)v5]; do { v39 = *v38++; *(v38 - 1) = *((_BYTE *)&_ImageBase + v39 + 63172816); --v37; } while ( v37 ); *v27 ^= v28; v40 = 4i64; do { v41 = *(unsigned __int8 *)v27; v27 = (_DWORD *)((char *)v27 + 1); *((_BYTE *)v27 - 1) = *((_BYTE *)&_ImageBase + v41 + 63172816); --v40; } while ( v40 ); v42 = v6 + v7; v5 = (unsigned int)(v5 + 4); v43 = v7 ^ v6; v4 = v42 >> 32; v8 -= 32; v7 = v43 ^ (v43 << 14) ^ __ROR8__(v7, 9); v6 = __ROR8__(v43, 28); --v26; } while ( v26 ); } if ( v8 >= 8 ) { v44 = (unsigned __int64)v8 >> 3; do { v45 = 11 * v4; v8 -= 8; v46 = 51 * v4; v47 = 14641 * v4; v48 = __ROR1__(~__ROL1__(v2[v5] + 49 * v47, 1331 * v47 % 7 + 1), 11 * v47 % 7 + 1); v49 = v47 % 7 + 1; v50 = v6 + v7; v51 = v7 ^ v6; v4 = v50 >> 32; v7 = v51 ^ (v51 << 14) ^ __ROR8__(v7, 9); v2[v5] = __ROL1__(v48, v49) - v45 - v46; v6 = __ROR8__(v51, 28); v5 = (unsigned int)(v5 + 1); --v44; } while ( v44 ); v3 = v53; } if ( v8 ) v2[v5] ^= v4 & 1; result = *(unsigned __int16 *)(v3 + 12); v2[result] = 0; return result; }