diff --git a/.gitignore b/.gitignore index e3ca1255d6abe4850c88b2deeca7f1c0cbedf55a..7c0e140fa5cfdbc99348164938983fc58cdda580 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,11 @@ logical/CortexA53_1 +logical/GIC400 logical/sie300 -logical/shared \ No newline at end of file +logical/SMC +logical/shared +logical/nic400_megasoc_main + +software/build + +.ecmproject +.project \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..0f116cb0158facbca97929d6e3f2b3b7fb38504b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "logical/sl_ahb_qspi"] + path = logical/sl_ahb_qspi + url = https://git.soton.ac.uk/soclabs/ahb_qspi.git diff --git a/README.md b/README.md index d9e1ce85906ad3ba83d967685c1fbfb2e52ed827..923e349b84e03cf457e2377a75fa16854ad364f1 100644 --- a/README.md +++ b/README.md @@ -1,93 +1,33 @@ # MegaSoC Tech +Part of the MegaSoC SoC reference design. -## Getting started +## Pre-requisites +You will need the following IP from Arm, all available through Arm Academic Access (AAA) -To make it easy for you to get started with GitLab, here's a list of recommended next steps. +- Cortex A55 +- NIC400 -Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)! +You will also need installed +- Socrates +- A simulator (e.g. QuestaSim, VCS) -## Add your files +For FPGA development you will need a Vivado installation -- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files -- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command: - -``` -cd existing_repo -git remote add origin https://git.soton.ac.uk/soclabs/megasoc_tech.git -git branch -M main -git push -uf origin main -``` - -## Integrate with your tools - -- [ ] [Set up project integrations](https://git.soton.ac.uk/soclabs/megasoc_tech/-/settings/integrations) - -## Collaborate with your team - -- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/) -- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) -- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) -- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) -- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html) - -## Test and Deploy - -Use the built-in continuous integration in GitLab. - -- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html) -- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) -- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) -- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/) -- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html) - -*** - -# Editing this README - -When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template. - -## Suggestions for a good README - -Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information. - -## Name -Choose a self-explaining name for your project. - -## Description -Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors. - -## Badges -On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge. +## Usage -## Visuals -Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method. +## Memory Map -## Installation -Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection. - -## Usage -Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README. ## Support -Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc. - -## Roadmap -If you have ideas for releases in the future, it is a good idea to list them in the README. +For support please go to [soclabs.org](https://soclabs.org/) ## Contributing -State if you are open to contributions and what your requirements are for accepting them. - -For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self. - -You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser. +Open to collaborations, if you're interested please head over to soclabs.org and register your interest on the megasoc reference design project ## Authors and acknowledgment -Show your appreciation to those who have contributed to the project. - -## License -For open source projects, say how it is licensed. +Daniel Newbrook (d.newbrook@soton.ac.uk) ## Project status -If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers. +Under Development \ No newline at end of file diff --git a/flist/IP/AHB_SRAM.flist b/flist/IP/AHB_SRAM.flist new file mode 100644 index 0000000000000000000000000000000000000000..e51da72b577d378553ac13937c2c40ecbf4548be --- /dev/null +++ b/flist/IP/AHB_SRAM.flist @@ -0,0 +1,6 @@ + + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/ahb_SRAM/sl_ahb_sram.v +$(ARM_IP_LIBRARY_PATH)/Corstone-101/BP210-r1p1-00rel0/BP210-BU-00000-r1p1-00rel0/logical/models/memories/cmsdk_fpga_sram.v +$(ARM_IP_LIBRARY_PATH)/Corstone-101/BP210-r1p1-00rel0/BP210-BU-00000-r1p1-00rel0/logical/cmsdk_ahb_to_sram/verilog/cmsdk_ahb_to_sram.v +// $(ARM_IP_LIBRARY_PATH)/latest/Corstone-101/logical/models/memories/cmsdk_ahb_ram_beh.v diff --git a/flist/IP/AHB_SRAM_BEHAV.flist b/flist/IP/AHB_SRAM_BEHAV.flist new file mode 100644 index 0000000000000000000000000000000000000000..e52ffd76861f82244e34f1b2a7f63de57e80d9b5 --- /dev/null +++ b/flist/IP/AHB_SRAM_BEHAV.flist @@ -0,0 +1,4 @@ + + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/ahb_SRAM/behavioural/sl_ahb_sram.v +$(ARM_IP_LIBRARY_PATH)/latest/Corstone-101/logical/models/memories/cmsdk_ahb_ram_beh.v diff --git a/flist/IP/ARM_Cortex_A53.flist b/flist/IP/ARM_Cortex_A53.flist new file mode 100644 index 0000000000000000000000000000000000000000..f368fbe6eabc1b30b3ced5474a4cd68c28abc04e --- /dev/null +++ b/flist/IP/ARM_Cortex_A53.flist @@ -0,0 +1,365 @@ +//----------------------------------------------------------------------------- +// MegaSoC Arm Cortex A53 Filelist +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// David Mapstone (d.a.mapstone@soton.ac.uk) +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +// Abstract : Verilog Command File for Arm Cortex A53 IP +//----------------------------------------------------------------------------- + ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53stb/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53univent/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbap/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daplite/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daprom/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_addr_req_arbiter.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_data_read_buffers.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_data_write_buffers.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_dcu_alloc_mngmt.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_devsplit_mngmt.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_dvm_enc.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_linefill_descriptor.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_linefills_mngmt.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53biu/verilog/ca53biu_prefetch_stream_mngmt.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53cti/verilog/ca53cti.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53cti/verilog/ca53cti_apbif.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53cti/verilog/ca53cti_ci.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53cti/verilog/ca53cti_clkgate.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53cti/verilog/ca53cti_mapper.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53cti/verilog/ca53cti_ti.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_cachearb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_cachearb_seq_state.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_ccbctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_cp15.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_dvm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_ecc_correction.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dcu/verilog/ca53dcu_lspipe.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_unpack_opb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_agu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_iq.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_au.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_iq_dih.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_clz.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_ldst.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_crc32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec_imm_other.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_extract.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec_late_neon.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_extract_64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_div.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_gen_sat.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_mac.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_lu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_div_csa.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_mask_imm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_maskgen.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_div_quot.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_masksel.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_ld.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_rbit.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_early_exception.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_rbit_64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_etmif.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_sat_dbl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_lu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_sbitx.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_perm_ctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_shift.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_exception.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_alu_simd_sat.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_br.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_cp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_permutation.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_cpsr.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_polymul.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_ctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_ctl_reg_aa32_aa64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_clz54.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_ctl_reg_trans.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_clz64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_ctl_regexpand.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_reduce.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dbg.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_de.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_shift.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_de_pc.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_div.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_de_reg_extract.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_dp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_de_reg_trans.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_mul.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_de_regexpand.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_shift8.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_de_rp_dec.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_shift_sat.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec0_br.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_swap_max.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec0_dp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_vector_maxmin.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec0_ls.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_neon_vrec_est.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec0_neon.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec0_other.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_pmu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec1.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_psr_regfile.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec1_br.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_mul_array.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec1_late_neon.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_regbank.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec1_ls.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_search_rl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec1_neon.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_regbank.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec_forceop.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_special.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec_imm_dp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_store.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_dec_imm_ls.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_alu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_shift7.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_alu_denorm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_unpack_opa.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_alu_renorm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_swizzle_load.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_cg.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_swizzle_store.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_clz106.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_utlb_intf_entry.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_fp_clz24.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53dpu/verilog/ca53dpu_utlb_main_entry.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_apb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_clk.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_cmp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_counter.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_derived_res.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_event.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_extin.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_extout.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_fifo.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_fifo_pack.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_fifo_rotate.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_resources.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_rsrc_sel.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_rsrc_sel_pair.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_trace_gen.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_traceout.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53etm/verilog/ca53etm_viewinst.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_arb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_arb_m.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_arb_s.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_cpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_cpu_cpacket.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_cpu_cpo.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_cpu_dpacket.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_cpu_pcpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gic/verilog/ca53gic_cpu_vcpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_apb_bridge.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_apb_dec.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu_atb_bridge.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu_clk_reset.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu_debug.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu_power.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu_slice.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_cpu_timers.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_ctm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_power.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_romtable.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53gov/verilog/ca53governor_slice.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_cp15.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_ctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_lfb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_lfb_ctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_a32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_a64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_debug_dec.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_debug_dec_a64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_debug_dec_t32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_slice.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_t16.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_t32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pd_thumb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pdc.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pdc_t16.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pdc_t32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_bpd.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_btac.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_btic.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_crs.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_dec_branch.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_dec_branch_s32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_dec_class.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_dec_d1.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_dec_t16t32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_iq_format_i0.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_iq_format_i1.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_iq_format_ret.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_it_undef.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_pdc_checker.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_throttle.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_utlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_utlb_intf_entry.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53ifu/verilog/ca53ifu_pf_utlb_main_entry.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_acpslv.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_afb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_buf_age.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_clk.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_config.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_cpuslv.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_dvmcomp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_l2db.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_lfsr.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_lfsr_arb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_master.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_master_ace.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_master_retries.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_master_skyros.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_ramctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_reqbuf_acp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_reqbuf_cpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_reqbuf_ecc.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_reqbuf_snp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_reqbuf_sync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_sam.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_snpslv.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_tagctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_tagctl_ecc.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53scu/verilog/ca53scu_victimctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53stb/verilog/ca53stb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53stb/verilog/ca53stb_slot.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_cp_regs.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_dbg.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_dbg_bkpt.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_dbg_wpt.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_lookup.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_pagewalk.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_ramctl.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53tlb/verilog/ca53tlb_remap.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/cortexa53/verilog/ca53_cpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/cortexa53/verilog/ca53_cpu_reset.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/cortexa53/verilog/ca53_l2.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/cortexa53/verilog/ca53_l2noram.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/cortexa53/verilog/ca53_noram.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/cortexa53/verilog/CORTEXA53.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbap/verilog/DAPAPBAP.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbap/verilog/DAPApbApApbIf.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbap/verilog/DAPApbApDapIf.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPAPBMUX.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxArbiter.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxCLAMP.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxClBit0.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxClBit1.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxClBus0.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxSync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapapbmux/verilog/DAPApbMuxSysSlv.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daplite/verilog/DAPLITE.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daplite/verilog/DAPLiteDecoder.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daplite/verilog/DAPLiteMux.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daplite/verilog/DAPLiteRomMux.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/daprom/verilog/DAPROM.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPDpApbIfClamp.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPDpApbSync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPDpClamp0.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPDpEnSync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPDpIMux.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPDpSync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPJtagDpProtocol.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPSWJDP.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPSwDpApbIf.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPSwDpProtocol.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPSwDpSync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/dapswjdp/verilog/DAPSwjWatcher.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/cells/generic/ca53_cell_clkgate.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/cells/generic/ca53_cell_inter_clkgate.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/cells/generic/ca53_cell_sync.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/rams/generic/ca53_caches_tlb_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/rams/generic/ca53_generic_ram.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/rams/generic/ca53_l2_datarams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/rams/generic/ca53_l2_tagrams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/models/rams/generic/ca53scu_l1d_tagrams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gic_ext.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_biu_dpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gic_gov.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_biu_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gicarb_ext.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gov_scu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_biu_scu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gov_stb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_biu_tlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gov_tlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dcu_biu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ifu_biu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dcu_ifu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ifu_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dcu_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ifu_tlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dcu_stb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_rr_arb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dcu_tlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_rr_reg_arb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_scu_dcu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_dcu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_scu_ext.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_etm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_scu_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_gic.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_stb_biu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_gov.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_stb_dpu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_ifu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_stb_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_l2rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_repair64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_stb_scu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_scu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_tlb_etm.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_dpu_tlb.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_tlb_rams.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_check32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_check33.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_check64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_fatal32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_fatal33.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_fatal64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_etm_gov.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_generate32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_generate33.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_generate64.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_repair32.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_ecc_repair33.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gov_biu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gov_dcu.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/shared/verilog/ca53_gov_ifu.v diff --git a/flist/IP/CA53_tarmac.flist b/flist/IP/CA53_tarmac.flist new file mode 100644 index 0000000000000000000000000000000000000000..5a7e84a89947bfcff562a23a1dcdcc59b4bad35c --- /dev/null +++ b/flist/IP/CA53_tarmac.flist @@ -0,0 +1,6 @@ +//+define+CORTEXA53_UNIVENT +//+define+CORTEXA53_UNIVENT_DPI_CAPTURE + + ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53univent/verilog +$(SOCLABS_MEGASOC_TECH_DIR)/logical/CortexA53_1/logical/ca53univent/verilog/ca53_follower.sv \ No newline at end of file diff --git a/flist/IP/Corstone101.flist b/flist/IP/Corstone101.flist new file mode 100644 index 0000000000000000000000000000000000000000..7ab746b4c5de1a6ad97d5114f8174645a4580965 --- /dev/null +++ b/flist/IP/Corstone101.flist @@ -0,0 +1,6 @@ + + +$(ARM_IP_LIBRARY_PATH)/latest/Corstone-101/logical/cmsdk_apb_uart/verilog/cmsdk_apb_uart.v +$(ARM_IP_LIBRARY_PATH)/latest/Corstone-101/logical/cmsdk_apb_slave_mux/verilog/cmsdk_apb_slave_mux.v +$(ARM_IP_LIBRARY_PATH)/latest/Corstone-101/logical/cmsdk_ahb_to_apb/verilog/cmsdk_ahb_to_apb.v +$(ARM_IP_LIBRARY_PATH)/latest/Corstone-101/logical/cmsdk_apb_timer/verilog/cmsdk_apb_timer.v \ No newline at end of file diff --git a/flist/IP/GIC400.flist b/flist/IP/GIC400.flist new file mode 100644 index 0000000000000000000000000000000000000000..5b2213780d5d9cab71c0058ca8f2b0633bc11e2a --- /dev/null +++ b/flist/IP/GIC400.flist @@ -0,0 +1,20 @@ + + ++incdir+$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/GIC400.v + +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_axi_intf_addr_unpack.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_axi_intf_p1_tx_gen.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_axi_intf.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_clk.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_cpu_interface.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_cpu_lowids.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_distributor.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_interfaces.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_priority.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_spi_block.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_vcpu_interface_list_reg.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/gic400/verilog/gic400_vcpu_interface.v + +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/models/cells/generic/gic400_inter_clkgate.v +$(ARM_IP_LIBRARY_PATH)/GIC-400/PL490-BU-50000-r0p1-00rel0/gic400/logical/models/cells/generic/gic400_sync.v \ No newline at end of file diff --git a/flist/IP/PL011.flist b/flist/IP/PL011.flist new file mode 100644 index 0000000000000000000000000000000000000000..c15d9f0bbce4932af661e38375d30747e8b21f8d --- /dev/null +++ b/flist/IP/PL011.flist @@ -0,0 +1,25 @@ + + +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/Uart.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartApbif.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartBaudCntr.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartDataStp.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartDMA.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartInterrupt.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartIrDA.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartModem.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartReceive.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRegBlock.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRevAnd.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRXCntl.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRXFCntl.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRXFIFO.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRXParShft.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartRXRegFile.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartSynctoPCLK.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartSynctoUCLK.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartTest.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartTXCntl.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartTXFCntl.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartTXFIFO.v +$(ARM_IP_LIBRARY_PATH)/PL011/PL011-BU-00000-r1p5-00rel0/PL011_VC/uart_pl011/verilog/rtl_source/UartTXRegFile.v diff --git a/flist/IP/SIE300_SRAM_controller.flist b/flist/IP/SIE300_SRAM_controller.flist new file mode 100644 index 0000000000000000000000000000000000000000..323f76b3a1cfb2b997a3e8b80bf70c81f6d54479 --- /dev/null +++ b/flist/IP/SIE300_SRAM_controller.flist @@ -0,0 +1,26 @@ + + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/models/cells/generic/sie300_arm_and2.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/models/cells/generic/sie300_arm_or2.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/models/cells/generic/sie300_arm_sdff2yrpq.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/models/cells/generic/sie300_arm_xor2.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/shared/verilog/sie300_or_tree/verilog/sie300_or_tree.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/shared/verilog/sie300_sync/verilog/sie300_sync.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_addr_dec.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_arb.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_arq.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_awq.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_axi_mux.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_bq.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_clamp.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_eam.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_fifo.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_fifo_core.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_lpi_ctrl.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_one_hot.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_rbeat.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_resp_gen.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_rq.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_wbeat.sv +$(SOCLABS_MEGASOC_TECH_DIR)/logical/sie300/logical/sie300_axi5_sram_ctrl_1/verilog/sie300_axi5_sram_ctrl_1_wq.sv \ No newline at end of file diff --git a/flist/IP/nic400_megasoc_main.flist b/flist/IP/nic400_megasoc_main.flist new file mode 100644 index 0000000000000000000000000000000000000000..d86f90d581c91a9b1d48ad1d4679cc277f6c26b7 --- /dev/null +++ b/flist/IP/nic400_megasoc_main.flist @@ -0,0 +1,194 @@ +//----------------------------------------------------------------------------- +// MegaSoC NIC400 Main bus Filelist +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +// Abstract : Verilog Command File for NIC400 Main bus IP +//----------------------------------------------------------------------------- +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/nic400_megasoc_main.v + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_DRAM/verilog/nic400_amib_DRAM_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_DRAM/verilog/nic400_amib_DRAM_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_FLASH/verilog/nic400_amib_FLASH_a_gen_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_FLASH/verilog/nic400_amib_FLASH_ahb_m_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_FLASH/verilog/nic400_amib_FLASH_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_FLASH/verilog/nic400_amib_FLASH_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_FLASH/verilog/nic400_amib_FLASH_s_gen_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_GIC/verilog/nic400_amib_GIC_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_GIC/verilog/nic400_amib_GIC_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_RAM/verilog/nic400_amib_RAM_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_RAM/verilog/nic400_amib_RAM_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_ROM/verilog/nic400_amib_ROM_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_ROM/verilog/nic400_amib_ROM_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_apb_group0/verilog/nic400_amib_apb_group0_a_gen_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_apb_group0/verilog/nic400_amib_apb_group0_apb_m_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_apb_group0/verilog/nic400_amib_apb_group0_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_apb_group0/verilog/nic400_amib_apb_group0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog/nic400_asib_A53_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog/nic400_asib_A53_decode_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog/nic400_asib_A53_maskcntl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog/nic400_asib_A53_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog/nic400_asib_A53_rd_spi_cdas_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog/nic400_asib_A53_wr_spi_cdas_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_add_sel_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_add_sel_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_add_sel_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_maskcntl_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_maskcntl_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_maskcntl_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ml_blayer_0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ml_build_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ml_map_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ml_mlayer_0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ml_mlayer_1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ml_mlayer_2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_rd_spi_tt_s0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_rd_wr_arb_0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_rd_wr_arb_1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_rd_wr_arb_2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ret_sel_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ret_sel_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_ret_sel_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_wr_sel_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_wr_sel_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_wr_sel_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog/nic400_bm0_wr_spi_tt_s0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_add_sel_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_add_sel_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_add_sel_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_add_sel_ml3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_add_sel_ml4_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_maskcntl_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_maskcntl_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_maskcntl_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_maskcntl_ml3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_maskcntl_ml4_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_blayer_0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_build_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_map_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_mlayer_0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_mlayer_1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_mlayer_2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_mlayer_3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ml_mlayer_4_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_rd_spi_tt_s0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ret_sel_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ret_sel_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ret_sel_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ret_sel_ml3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_ret_sel_ml4_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_wr_sel_ml0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_wr_sel_ml1_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_wr_sel_ml2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_wr_sel_ml3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_wr_sel_ml4_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog/nic400_bm1_wr_spi_tt_s0_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_bypass_sync_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_capt_nosync_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_capt_sync_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_comb_and2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_comb_mux2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_comb_or2_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_comb_or3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_corrupt_gry_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_launch_gry_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog/nic400_cdc_random_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/default_slave_ds_3/verilog/nic400_default_slave_ds_3_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_axi_to_itb_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_itb_addr_fmt_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_rd_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_rd_chan_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_rd_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_resp_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_wr_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_wr_merge_buffer_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_wr_mux_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_downsize_wr_resp_block_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_itb_to_axi_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_master_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog/nic400_ib_FLASH_ib_slave_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_rd_addr_fmt_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_rd_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_rd_chan_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_rd_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_resp_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_wr_addr_fmt_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_wr_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_wr_merge_buffer_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_wr_mux_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_downsize_wr_resp_block_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_maskcntl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_master_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog/nic400_ib_GIC_ib_slave_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_axi_to_itb_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_itb_addr_fmt_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_rd_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_rd_chan_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_rd_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_resp_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_wr_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_wr_merge_buffer_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_wr_mux_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_downsize_wr_resp_block_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_itb_to_axi_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_master_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog/nic400_ib_apb_group0_ib_slave_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_chan_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_rd_addr_fmt_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_rd_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_rd_chan_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_rd_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_resp_cam_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_wr_addr_fmt_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_wr_cntrl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_wr_merge_buffer_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_wr_mux_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_downsize_wr_resp_block_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_maskcntl_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_master_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog/nic400_ib_ib2_slave_domain_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_ax_reg_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_ax4_reg_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_buf_reg_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_ful_regd_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_fwd_regd_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_rd_reg_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_reg_slice_axi_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_rev_regd_slice_megasoc_main.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog/nic400_wr_reg_slice_megasoc_main.v + ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_DRAM/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_FLASH/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_GIC/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_PERIPHERAL/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_RAM/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_ROM/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/amib_apb_group0/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/asib_A53/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm0/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/busmatrix_bm1/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/cdc_blocks/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/default_slave_ds_3/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_FLASH_ib/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_GIC_ib/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_PERIPHERAL_ib/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_apb_group0_ib/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/ib_ib2/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/reg_slice/verilog ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/Axi ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/Axi4PC ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/Ahb ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/AhbPC ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/ApbPC ++incdir+$(SOCLABS_MEGASOC_TECH_DIR)/logical/nic400_megasoc_main/logical/nic400_megasoc_main/nic400/verilog/Apb4PC diff --git a/flist/megasoc_tech.flist b/flist/megasoc_tech.flist deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/flist/megasoc_tech_BEHAV.flist b/flist/megasoc_tech_BEHAV.flist new file mode 100644 index 0000000000000000000000000000000000000000..712991e0c7bef266aed6bdea5b2b8b45e1a05459 --- /dev/null +++ b/flist/megasoc_tech_BEHAV.flist @@ -0,0 +1,36 @@ +//----------------------------------------------------------------------------- +// MegaSoC Tech Filelist +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// David Mapstone (d.a.mapstone@soton.ac.uk) +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-3, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +// Abstract : Verilog Command File for MegaSoC Tech IP +//----------------------------------------------------------------------------- + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/top_megasoc_tech/megasoc_tech_wrapper.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_cpu_ss.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_irq_sync.v + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/ROM/behavioural/ROM_wrapper.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/SRAM/behavioural/SRAM.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/SRAM/behavioural/SRAM_wrapper.v + + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/megasoc_subsystems/peripheral_subsystem/megasoc_peripheral_subsystem.v + +// -f $(SOCLABS_MEGASOC_TECH_DIR)/logical/sl_ahb_qspi/flist/Top/ahb_QSPI_SIM.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/AHB_SRAM_BEHAV.flist +// ARM IP +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/ARM_Cortex_A53.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/CA53_tarmac.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/GIC400.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/nic400_megasoc_main.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/SIE300_SRAM_controller.flist +// -f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/PL011.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/Corstone101.flist \ No newline at end of file diff --git a/flist/megasoc_tech_FPGA.flist b/flist/megasoc_tech_FPGA.flist new file mode 100644 index 0000000000000000000000000000000000000000..5b8f6bac5532fc771ec54453b982e7326d8d8952 --- /dev/null +++ b/flist/megasoc_tech_FPGA.flist @@ -0,0 +1,34 @@ +//----------------------------------------------------------------------------- +// MegaSoC Tech Filelist +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// David Mapstone (d.a.mapstone@soton.ac.uk) +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-3, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +// Abstract : Verilog Command File for MegaSoC Tech IP +//----------------------------------------------------------------------------- + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/top_megasoc_tech/megasoc_tech_wrapper.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_cpu_ss.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_irq_sync.v + +$(SOCLABS_MEGASOC_TECH_DIR)/logical/ROM/ROM_wrapper.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/SRAM/logical/SRAM.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/SRAM/logical/SRAM_wrapper.v +$(SOCLABS_MEGASOC_TECH_DIR)/logical/megasoc_subsystems/peripheral_subsystem/megasoc_peripheral_subsystem.v + +// -f $(SOCLABS_MEGASOC_TECH_DIR)/logical/sl_ahb_qspi/flist/Top/ahb_QSPI_SIM.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/AHB_SRAM.flist +// ARM IP +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/ARM_Cortex_A53.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/CA53_tarmac.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/GIC400.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/nic400_megasoc_main.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/SIE300_SRAM_controller.flist +// -f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/PL011.flist +-f $(SOCLABS_MEGASOC_TECH_DIR)/flist/IP/Corstone101.flist \ No newline at end of file diff --git a/logical/ROM/ASIC/TSMC16nm/ROM_wrapper.v b/logical/ROM/ASIC/TSMC16nm/ROM_wrapper.v new file mode 100644 index 0000000000000000000000000000000000000000..c68e6b8447210a4cb16bb7a198e6a419e6986b35 --- /dev/null +++ b/logical/ROM/ASIC/TSMC16nm/ROM_wrapper.v @@ -0,0 +1,156 @@ +//----------------------------------------------------------------------------- +// Expansion Subsystem SRAM Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Modules instantiated: +// sie300_axi5_sram_ctrl_expansion_subsystem +// SRAM +// +// ToDo +// -replace SRAM with TSMC ROM + +module ROM_wrapper( + input wire ACLK, + input wire ARESETn, + + input wire AWVALID, + output wire AWREADY, + input wire [5:0] AWID, + input wire [31:0] AWADDR, + input wire [7:0] AWLEN, + input wire [2:0] AWSIZE, + input wire [1:0] AWBURST, + input wire AWLOCK, + input wire [2:0] AWPROT, + input wire [3:0] AWQOS, + + input wire WVALID, + output wire WREADY, + input wire [63:0] WDATA, + input wire [7:0] WSTRB, + input wire WLAST, + input wire WPOISON, + + output wire BVALID, + input wire BREADY, + output wire [5:0] BID, + output wire [1:0] BRESP, + + input wire ARVALID, + output wire ARREADY, + input wire [5:0] ARID, + input wire [31:0] ARADDR, + input wire [7:0] ARLEN, + input wire [2:0] ARSIZE, + input wire [1:0] ARBURST, + input wire ARLOCK, + input wire [2:0] ARPROT, + input wire [3:0] ARQOS, + + output wire RVALID, + input wire RREADY, + output wire [5:0] RID, + output wire [63:0] RDATA, + output wire [1:0] RRESP, + output wire RLAST, + output wire RPOISON, + input wire AWAKEUP, + + input wire clk_qreqn, + output wire clk_qacceptn, + output wire clk_qdeny, + output wire clk_qactive, + + input wire pwr_qreqn, + output wire pwr_qacceptn, + output wire pwr_qdeny, + output wire pwr_qactive, + + input wire ext_gt_qreqn, + output wire ext_gt_qacceptn, + input wire cfg_gate_resp +); + + +wire [19:0] memaddr; +wire [64:0] memd; +wire [64:0] memq; +wire memcen; +wire [7:0] memwen; + +sie300_axi5_sram_ctrl_1 u_SMC( + .aclk(ACLK), + .aresetn(ARESETn), + .awvalid_s(AWVALID), + .awready_s(AWREADY), + .awid_s(AWID), + .awaddr_s(AWADDR[19:0]), + .awlen_s(AWLEN), + .awsize_s(AWSIZE), + .awburst_s(AWBURST), + .awlock_s(AWLOCK), + .awprot_s(AWPROT), + .awqos_s(AWQOS), + .wvalid_s(WVALID), + .wready_s(WREADY), + .wdata_s(WDATA), + .wstrb_s(WSTRB), + .wlast_s(WLAST), + .wpoison_s(WPOISON), + .bvalid_s(BVALID), + .bready_s(BREADY), + .bid_s(BID), + .bresp_s(BRESP), + .arvalid_s(ARVALID), + .arready_s(ARREADY), + .arid_s(ARID), + .araddr_s(ARADDR[19:0]), + .arlen_s(ARLEN), + .arsize_s(ARSIZE), + .arburst_s(ARBURST), + .arlock_s(ARLOCK), + .arprot_s(ARPROT), + .arqos_s(ARQOS), + .rvalid_s(RVALID), + .rready_s(RREADY), + .rid_s(RID), + .rdata_s(RDATA), + .rresp_s(RRESP), + .rlast_s(RLAST), + .rpoison_s(RPOISON), + .awakeup_s(AWAKEUP), + .clk_qreqn(clk_qreqn), + .clk_qacceptn(clk_qacceptn), + .clk_qdeny(clk_qdeny), + .clk_qactive(clk_qactive), + .pwr_qreqn(pwr_qreqn), + .pwr_qacceptn(pwr_qacceptn), + .pwr_qdeny(pwr_qdeny), + .pwr_qactive(pwr_qactive), + .ext_gt_qreqn(ext_gt_qreqn), + .ext_gt_qacceptn(ext_gt_qacceptn), + .cfg_gate_resp(cfg_gate_resp), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + +SRAM #(.MEM_DEPTH(1<<14)) u_ROM ( + .clk(ACLK), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + + +endmodule \ No newline at end of file diff --git a/logical/ROM/ROM_wrapper copy.v b/logical/ROM/ROM_wrapper copy.v new file mode 100644 index 0000000000000000000000000000000000000000..9a98e98d88bbb8f4b73d3780cc161ea966c454f8 --- /dev/null +++ b/logical/ROM/ROM_wrapper copy.v @@ -0,0 +1,153 @@ +//----------------------------------------------------------------------------- +// Expansion Subsystem SRAM Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Modules instantiated: +// sie300_axi5_sram_ctrl_expansion_subsystem +// SRAM + +module ROM_wrapper( + input wire ACLK, + input wire ARESETn, + + input wire AWVALID, + output wire AWREADY, + input wire [5:0] AWID, + input wire [31:0] AWADDR, + input wire [7:0] AWLEN, + input wire [2:0] AWSIZE, + input wire [1:0] AWBURST, + input wire AWLOCK, + input wire [2:0] AWPROT, + input wire [3:0] AWQOS, + + input wire WVALID, + output wire WREADY, + input wire [63:0] WDATA, + input wire [7:0] WSTRB, + input wire WLAST, + input wire WPOISON, + + output wire BVALID, + input wire BREADY, + output wire [5:0] BID, + output wire [1:0] BRESP, + + input wire ARVALID, + output wire ARREADY, + input wire [5:0] ARID, + input wire [31:0] ARADDR, + input wire [7:0] ARLEN, + input wire [2:0] ARSIZE, + input wire [1:0] ARBURST, + input wire ARLOCK, + input wire [2:0] ARPROT, + input wire [3:0] ARQOS, + + output wire RVALID, + input wire RREADY, + output wire [5:0] RID, + output wire [63:0] RDATA, + output wire [1:0] RRESP, + output wire RLAST, + output wire RPOISON, + input wire AWAKEUP, + + input wire clk_qreqn, + output wire clk_qacceptn, + output wire clk_qdeny, + output wire clk_qactive, + + input wire pwr_qreqn, + output wire pwr_qacceptn, + output wire pwr_qdeny, + output wire pwr_qactive, + + input wire ext_gt_qreqn, + output wire ext_gt_qacceptn, + input wire cfg_gate_resp +); + + +wire [19:0] memaddr; +wire [64:0] memd; +wire [64:0] memq; +wire memcen; +wire [7:0] memwen; + +sie300_axi5_sram_ctrl_1 u_SMC( + .aclk(ACLK), + .aresetn(ARESETn), + .awvalid_s(AWVALID), + .awready_s(AWREADY), + .awid_s(AWID), + .awaddr_s(AWADDR[19:0]), + .awlen_s(AWLEN), + .awsize_s(AWSIZE), + .awburst_s(AWBURST), + .awlock_s(AWLOCK), + .awprot_s(AWPROT), + .awqos_s(AWQOS), + .wvalid_s(WVALID), + .wready_s(WREADY), + .wdata_s(WDATA), + .wstrb_s(WSTRB), + .wlast_s(WLAST), + .wpoison_s(WPOISON), + .bvalid_s(BVALID), + .bready_s(BREADY), + .bid_s(BID), + .bresp_s(BRESP), + .arvalid_s(ARVALID), + .arready_s(ARREADY), + .arid_s(ARID), + .araddr_s(ARADDR[19:0]), + .arlen_s(ARLEN), + .arsize_s(ARSIZE), + .arburst_s(ARBURST), + .arlock_s(ARLOCK), + .arprot_s(ARPROT), + .arqos_s(ARQOS), + .rvalid_s(RVALID), + .rready_s(RREADY), + .rid_s(RID), + .rdata_s(RDATA), + .rresp_s(RRESP), + .rlast_s(RLAST), + .rpoison_s(RPOISON), + .awakeup_s(AWAKEUP), + .clk_qreqn(clk_qreqn), + .clk_qacceptn(clk_qacceptn), + .clk_qdeny(clk_qdeny), + .clk_qactive(clk_qactive), + .pwr_qreqn(pwr_qreqn), + .pwr_qacceptn(pwr_qacceptn), + .pwr_qdeny(pwr_qdeny), + .pwr_qactive(pwr_qactive), + .ext_gt_qreqn(ext_gt_qreqn), + .ext_gt_qacceptn(ext_gt_qacceptn), + .cfg_gate_resp(cfg_gate_resp), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + +SRAM #(.MEM_DEPTH(1<<14)) u_ROM ( + .clk(ACLK), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + + +endmodule \ No newline at end of file diff --git a/logical/ROM/ROM_wrapper.v b/logical/ROM/ROM_wrapper.v new file mode 100644 index 0000000000000000000000000000000000000000..5ca6f54013a761a955bc85d535ad257788bdeb67 --- /dev/null +++ b/logical/ROM/ROM_wrapper.v @@ -0,0 +1,162 @@ +//----------------------------------------------------------------------------- +// Expansion Subsystem SRAM Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Modules instantiated: +// sie300_axi5_sram_ctrl_expansion_subsystem +// SRAM + +module ROM_wrapper( + input wire ACLK, + input wire ARESETn, + + input wire AWVALID, + output wire AWREADY, + input wire [5:0] AWID, + input wire [31:0] AWADDR, + input wire [7:0] AWLEN, + input wire [2:0] AWSIZE, + input wire [1:0] AWBURST, + input wire AWLOCK, + input wire [2:0] AWPROT, + input wire [3:0] AWQOS, + + input wire WVALID, + output wire WREADY, + input wire [63:0] WDATA, + input wire [7:0] WSTRB, + input wire WLAST, + input wire WPOISON, + + output wire BVALID, + input wire BREADY, + output wire [5:0] BID, + output wire [1:0] BRESP, + + input wire ARVALID, + output wire ARREADY, + input wire [5:0] ARID, + input wire [31:0] ARADDR, + input wire [7:0] ARLEN, + input wire [2:0] ARSIZE, + input wire [1:0] ARBURST, + input wire ARLOCK, + input wire [2:0] ARPROT, + input wire [3:0] ARQOS, + + output wire RVALID, + input wire RREADY, + output wire [5:0] RID, + output wire [63:0] RDATA, + output wire [1:0] RRESP, + output wire RLAST, + output wire RPOISON, + input wire AWAKEUP, + + input wire clk_qreqn, + output wire clk_qacceptn, + output wire clk_qdeny, + output wire clk_qactive, + + input wire pwr_qreqn, + output wire pwr_qacceptn, + output wire pwr_qdeny, + output wire pwr_qactive, + + input wire ext_gt_qreqn, + output wire ext_gt_qacceptn, + input wire cfg_gate_resp +); + + +wire [19:0] memaddr; +wire [64:0] memd; +wire [64:0] memq; +wire memcen; +wire [7:0] memwen; + +sie300_axi5_sram_ctrl_1 u_SMC( + .aclk(ACLK), + .aresetn(ARESETn), + .awvalid_s(AWVALID), + .awready_s(AWREADY), + .awid_s(AWID), + .awaddr_s(AWADDR[19:0]), + .awlen_s(AWLEN), + .awsize_s(AWSIZE), + .awburst_s(AWBURST), + .awlock_s(AWLOCK), + .awprot_s(AWPROT), + .awqos_s(AWQOS), + .wvalid_s(WVALID), + .wready_s(WREADY), + .wdata_s(WDATA), + .wstrb_s(WSTRB), + .wlast_s(WLAST), + .wpoison_s(WPOISON), + .bvalid_s(BVALID), + .bready_s(BREADY), + .bid_s(BID), + .bresp_s(BRESP), + .arvalid_s(ARVALID), + .arready_s(ARREADY), + .arid_s(ARID), + .araddr_s(ARADDR[19:0]), + .arlen_s(ARLEN), + .arsize_s(ARSIZE), + .arburst_s(ARBURST), + .arlock_s(ARLOCK), + .arprot_s(ARPROT), + .arqos_s(ARQOS), + .rvalid_s(RVALID), + .rready_s(RREADY), + .rid_s(RID), + .rdata_s(RDATA), + .rresp_s(RRESP), + .rlast_s(RLAST), + .rpoison_s(RPOISON), + .awakeup_s(AWAKEUP), + .clk_qreqn(clk_qreqn), + .clk_qacceptn(clk_qacceptn), + .clk_qdeny(clk_qdeny), + .clk_qactive(clk_qactive), + .pwr_qreqn(pwr_qreqn), + .pwr_qacceptn(pwr_qacceptn), + .pwr_qdeny(pwr_qdeny), + .pwr_qactive(pwr_qactive), + .ext_gt_qreqn(ext_gt_qreqn), + .ext_gt_qacceptn(ext_gt_qacceptn), + .cfg_gate_resp(cfg_gate_resp), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + +cmsdk_fpga_sram #(.AW(15)) u_fpga_sram_0( + .CLK(ACLK), + .ADDR(memaddr), + .WDATA(memd[31:0]), + .WREN(memwen[3:0]), + .CS(memcen), + .RDATA(memq[31:0]) +); + +cmsdk_fpga_sram #(.AW(15)) u_fpga_sram_1( + .CLK(ACLK), + .ADDR(memaddr), + .WDATA(memd[63:32]), + .WREN(memwen[7:4]), + .CS(memcen), + .RDATA(memq[63:32]) +); + + +endmodule \ No newline at end of file diff --git a/logical/ROM/behavioural/ROM_wrapper.v b/logical/ROM/behavioural/ROM_wrapper.v new file mode 100644 index 0000000000000000000000000000000000000000..9a98e98d88bbb8f4b73d3780cc161ea966c454f8 --- /dev/null +++ b/logical/ROM/behavioural/ROM_wrapper.v @@ -0,0 +1,153 @@ +//----------------------------------------------------------------------------- +// Expansion Subsystem SRAM Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Modules instantiated: +// sie300_axi5_sram_ctrl_expansion_subsystem +// SRAM + +module ROM_wrapper( + input wire ACLK, + input wire ARESETn, + + input wire AWVALID, + output wire AWREADY, + input wire [5:0] AWID, + input wire [31:0] AWADDR, + input wire [7:0] AWLEN, + input wire [2:0] AWSIZE, + input wire [1:0] AWBURST, + input wire AWLOCK, + input wire [2:0] AWPROT, + input wire [3:0] AWQOS, + + input wire WVALID, + output wire WREADY, + input wire [63:0] WDATA, + input wire [7:0] WSTRB, + input wire WLAST, + input wire WPOISON, + + output wire BVALID, + input wire BREADY, + output wire [5:0] BID, + output wire [1:0] BRESP, + + input wire ARVALID, + output wire ARREADY, + input wire [5:0] ARID, + input wire [31:0] ARADDR, + input wire [7:0] ARLEN, + input wire [2:0] ARSIZE, + input wire [1:0] ARBURST, + input wire ARLOCK, + input wire [2:0] ARPROT, + input wire [3:0] ARQOS, + + output wire RVALID, + input wire RREADY, + output wire [5:0] RID, + output wire [63:0] RDATA, + output wire [1:0] RRESP, + output wire RLAST, + output wire RPOISON, + input wire AWAKEUP, + + input wire clk_qreqn, + output wire clk_qacceptn, + output wire clk_qdeny, + output wire clk_qactive, + + input wire pwr_qreqn, + output wire pwr_qacceptn, + output wire pwr_qdeny, + output wire pwr_qactive, + + input wire ext_gt_qreqn, + output wire ext_gt_qacceptn, + input wire cfg_gate_resp +); + + +wire [19:0] memaddr; +wire [64:0] memd; +wire [64:0] memq; +wire memcen; +wire [7:0] memwen; + +sie300_axi5_sram_ctrl_1 u_SMC( + .aclk(ACLK), + .aresetn(ARESETn), + .awvalid_s(AWVALID), + .awready_s(AWREADY), + .awid_s(AWID), + .awaddr_s(AWADDR[19:0]), + .awlen_s(AWLEN), + .awsize_s(AWSIZE), + .awburst_s(AWBURST), + .awlock_s(AWLOCK), + .awprot_s(AWPROT), + .awqos_s(AWQOS), + .wvalid_s(WVALID), + .wready_s(WREADY), + .wdata_s(WDATA), + .wstrb_s(WSTRB), + .wlast_s(WLAST), + .wpoison_s(WPOISON), + .bvalid_s(BVALID), + .bready_s(BREADY), + .bid_s(BID), + .bresp_s(BRESP), + .arvalid_s(ARVALID), + .arready_s(ARREADY), + .arid_s(ARID), + .araddr_s(ARADDR[19:0]), + .arlen_s(ARLEN), + .arsize_s(ARSIZE), + .arburst_s(ARBURST), + .arlock_s(ARLOCK), + .arprot_s(ARPROT), + .arqos_s(ARQOS), + .rvalid_s(RVALID), + .rready_s(RREADY), + .rid_s(RID), + .rdata_s(RDATA), + .rresp_s(RRESP), + .rlast_s(RLAST), + .rpoison_s(RPOISON), + .awakeup_s(AWAKEUP), + .clk_qreqn(clk_qreqn), + .clk_qacceptn(clk_qacceptn), + .clk_qdeny(clk_qdeny), + .clk_qactive(clk_qactive), + .pwr_qreqn(pwr_qreqn), + .pwr_qacceptn(pwr_qacceptn), + .pwr_qdeny(pwr_qdeny), + .pwr_qactive(pwr_qactive), + .ext_gt_qreqn(ext_gt_qreqn), + .ext_gt_qacceptn(ext_gt_qacceptn), + .cfg_gate_resp(cfg_gate_resp), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + +SRAM #(.MEM_DEPTH(1<<14)) u_ROM ( + .clk(ACLK), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + + +endmodule \ No newline at end of file diff --git a/logical/ROM/fpga/ROM_wrapper.v b/logical/ROM/fpga/ROM_wrapper.v new file mode 100644 index 0000000000000000000000000000000000000000..5ca6f54013a761a955bc85d535ad257788bdeb67 --- /dev/null +++ b/logical/ROM/fpga/ROM_wrapper.v @@ -0,0 +1,162 @@ +//----------------------------------------------------------------------------- +// Expansion Subsystem SRAM Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Modules instantiated: +// sie300_axi5_sram_ctrl_expansion_subsystem +// SRAM + +module ROM_wrapper( + input wire ACLK, + input wire ARESETn, + + input wire AWVALID, + output wire AWREADY, + input wire [5:0] AWID, + input wire [31:0] AWADDR, + input wire [7:0] AWLEN, + input wire [2:0] AWSIZE, + input wire [1:0] AWBURST, + input wire AWLOCK, + input wire [2:0] AWPROT, + input wire [3:0] AWQOS, + + input wire WVALID, + output wire WREADY, + input wire [63:0] WDATA, + input wire [7:0] WSTRB, + input wire WLAST, + input wire WPOISON, + + output wire BVALID, + input wire BREADY, + output wire [5:0] BID, + output wire [1:0] BRESP, + + input wire ARVALID, + output wire ARREADY, + input wire [5:0] ARID, + input wire [31:0] ARADDR, + input wire [7:0] ARLEN, + input wire [2:0] ARSIZE, + input wire [1:0] ARBURST, + input wire ARLOCK, + input wire [2:0] ARPROT, + input wire [3:0] ARQOS, + + output wire RVALID, + input wire RREADY, + output wire [5:0] RID, + output wire [63:0] RDATA, + output wire [1:0] RRESP, + output wire RLAST, + output wire RPOISON, + input wire AWAKEUP, + + input wire clk_qreqn, + output wire clk_qacceptn, + output wire clk_qdeny, + output wire clk_qactive, + + input wire pwr_qreqn, + output wire pwr_qacceptn, + output wire pwr_qdeny, + output wire pwr_qactive, + + input wire ext_gt_qreqn, + output wire ext_gt_qacceptn, + input wire cfg_gate_resp +); + + +wire [19:0] memaddr; +wire [64:0] memd; +wire [64:0] memq; +wire memcen; +wire [7:0] memwen; + +sie300_axi5_sram_ctrl_1 u_SMC( + .aclk(ACLK), + .aresetn(ARESETn), + .awvalid_s(AWVALID), + .awready_s(AWREADY), + .awid_s(AWID), + .awaddr_s(AWADDR[19:0]), + .awlen_s(AWLEN), + .awsize_s(AWSIZE), + .awburst_s(AWBURST), + .awlock_s(AWLOCK), + .awprot_s(AWPROT), + .awqos_s(AWQOS), + .wvalid_s(WVALID), + .wready_s(WREADY), + .wdata_s(WDATA), + .wstrb_s(WSTRB), + .wlast_s(WLAST), + .wpoison_s(WPOISON), + .bvalid_s(BVALID), + .bready_s(BREADY), + .bid_s(BID), + .bresp_s(BRESP), + .arvalid_s(ARVALID), + .arready_s(ARREADY), + .arid_s(ARID), + .araddr_s(ARADDR[19:0]), + .arlen_s(ARLEN), + .arsize_s(ARSIZE), + .arburst_s(ARBURST), + .arlock_s(ARLOCK), + .arprot_s(ARPROT), + .arqos_s(ARQOS), + .rvalid_s(RVALID), + .rready_s(RREADY), + .rid_s(RID), + .rdata_s(RDATA), + .rresp_s(RRESP), + .rlast_s(RLAST), + .rpoison_s(RPOISON), + .awakeup_s(AWAKEUP), + .clk_qreqn(clk_qreqn), + .clk_qacceptn(clk_qacceptn), + .clk_qdeny(clk_qdeny), + .clk_qactive(clk_qactive), + .pwr_qreqn(pwr_qreqn), + .pwr_qacceptn(pwr_qacceptn), + .pwr_qdeny(pwr_qdeny), + .pwr_qactive(pwr_qactive), + .ext_gt_qreqn(ext_gt_qreqn), + .ext_gt_qacceptn(ext_gt_qacceptn), + .cfg_gate_resp(cfg_gate_resp), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + +cmsdk_fpga_sram #(.AW(15)) u_fpga_sram_0( + .CLK(ACLK), + .ADDR(memaddr), + .WDATA(memd[31:0]), + .WREN(memwen[3:0]), + .CS(memcen), + .RDATA(memq[31:0]) +); + +cmsdk_fpga_sram #(.AW(15)) u_fpga_sram_1( + .CLK(ACLK), + .ADDR(memaddr), + .WDATA(memd[63:32]), + .WREN(memwen[7:4]), + .CS(memcen), + .RDATA(memq[63:32]) +); + + +endmodule \ No newline at end of file diff --git a/logical/SRAM/logical/SRAM.v b/logical/SRAM/behavioural/SRAM.v similarity index 89% rename from logical/SRAM/logical/SRAM.v rename to logical/SRAM/behavioural/SRAM.v index e9d7b460d6f62885cb89a32788196f2525c25064..c5b36b2694be683f07a7e0365394248558fcbb3d 100644 --- a/logical/SRAM/logical/SRAM.v +++ b/logical/SRAM/behavioural/SRAM.v @@ -1,17 +1,18 @@ -module SRAM ( +module SRAM #( + parameter MEM_DEPTH = (1<<15) +) ( input wire clk, - input wire [13:0] memaddr, + input wire [19:0] memaddr, input wire [63:0] memd, output wire [63:0] memq, input wire memcen, input wire [7:0] memwen ); - parameter MEM_DEPTH = (1<<10); wire WriteEnable; // Write data update - wire [10:0] Addr; + wire [15:0] Addr; reg [63:0] DataAtAddress; // Current write-data at address reg [63:0] Mask; // Write data-mask reg [63:0] NextData; // Next write-data @@ -19,7 +20,7 @@ module SRAM ( integer i; // Write-strobe loop variable integer j; // Mask-bit loop variable - assign Addr = memaddr[13:4]; + assign Addr = memaddr[19:3]; // ------------- // Memory arrays // ------------- @@ -28,6 +29,15 @@ module SRAM ( reg [63:0] mem [MEM_DEPTH-1:0]; assign WriteEnable = (memwen != {8{1'b1}}) ? 1'b1 : 1'b0; + integer k; + initial + begin + for(k=0;k<MEM_DEPTH;k=k+1) begin + mem[k] = 64'd0; + end + end + + always @ (posedge clk) begin : p_memaccess // Only access the memory when the chip is enabled diff --git a/logical/SRAM/behavioural/SRAM_wrapper.v b/logical/SRAM/behavioural/SRAM_wrapper.v new file mode 100644 index 0000000000000000000000000000000000000000..408cc9e742290b31a5251cae0e4f59e7a6d3b88a --- /dev/null +++ b/logical/SRAM/behavioural/SRAM_wrapper.v @@ -0,0 +1,153 @@ +//----------------------------------------------------------------------------- +// Megasoc SRAM Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Modules instantiated: +// sie300_axi5_sram_ctrl_expansion_subsystem +// SRAM + +module SRAM_wrapper( + input wire ACLK, + input wire ARESETn, + + input wire AWVALID, + output wire AWREADY, + input wire [5:0] AWID, + input wire [31:0] AWADDR, + input wire [7:0] AWLEN, + input wire [2:0] AWSIZE, + input wire [1:0] AWBURST, + input wire AWLOCK, + input wire [2:0] AWPROT, + input wire [3:0] AWQOS, + + input wire WVALID, + output wire WREADY, + input wire [63:0] WDATA, + input wire [7:0] WSTRB, + input wire WLAST, + input wire WPOISON, + + output wire BVALID, + input wire BREADY, + output wire [5:0] BID, + output wire [1:0] BRESP, + + input wire ARVALID, + output wire ARREADY, + input wire [5:0] ARID, + input wire [31:0] ARADDR, + input wire [7:0] ARLEN, + input wire [2:0] ARSIZE, + input wire [1:0] ARBURST, + input wire ARLOCK, + input wire [2:0] ARPROT, + input wire [3:0] ARQOS, + + output wire RVALID, + input wire RREADY, + output wire [5:0] RID, + output wire [63:0] RDATA, + output wire [1:0] RRESP, + output wire RLAST, + output wire [1:0] RPOISON, + input wire AWAKEUP, + + input wire clk_qreqn, + output wire clk_qacceptn, + output wire clk_qdeny, + output wire clk_qactive, + + input wire pwr_qreqn, + output wire pwr_qacceptn, + output wire pwr_qdeny, + output wire pwr_qactive, + + input wire ext_gt_qreqn, + output wire ext_gt_qacceptn, + input wire cfg_gate_resp +); + + +wire [19:0] memaddr; +wire [63:0] memd; +wire [63:0] memq; +wire memcen; +wire [7:0] memwen; + +sie300_axi5_sram_ctrl_1 u_SMC( + .aclk(ACLK), + .aresetn(ARESETn), + .awvalid_s(AWVALID), + .awready_s(AWREADY), + .awid_s(AWID), + .awaddr_s(AWADDR[19:0]), + .awlen_s(AWLEN), + .awsize_s(AWSIZE), + .awburst_s(AWBURST), + .awlock_s(AWLOCK), + .awprot_s(AWPROT), + .awqos_s(AWQOS), + .wvalid_s(WVALID), + .wready_s(WREADY), + .wdata_s(WDATA), + .wstrb_s(WSTRB), + .wlast_s(WLAST), + .wpoison_s(WPOISON), + .bvalid_s(BVALID), + .bready_s(BREADY), + .bid_s(BID), + .bresp_s(BRESP), + .arvalid_s(ARVALID), + .arready_s(ARREADY), + .arid_s(ARID), + .araddr_s(ARADDR[19:0]), + .arlen_s(ARLEN), + .arsize_s(ARSIZE), + .arburst_s(ARBURST), + .arlock_s(ARLOCK), + .arprot_s(ARPROT), + .arqos_s(ARQOS), + .rvalid_s(RVALID), + .rready_s(RREADY), + .rid_s(RID), + .rdata_s(RDATA), + .rresp_s(RRESP), + .rlast_s(RLAST), + .rpoison_s(RPOISON), + .awakeup_s(AWAKEUP), + .clk_qreqn(clk_qreqn), + .clk_qacceptn(clk_qacceptn), + .clk_qdeny(clk_qdeny), + .clk_qactive(clk_qactive), + .pwr_qreqn(pwr_qreqn), + .pwr_qacceptn(pwr_qacceptn), + .pwr_qdeny(pwr_qdeny), + .pwr_qactive(pwr_qactive), + .ext_gt_qreqn(ext_gt_qreqn), + .ext_gt_qacceptn(ext_gt_qacceptn), + .cfg_gate_resp(cfg_gate_resp), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + +SRAM u_SRAM( + .clk(ACLK), + .memaddr(memaddr), + .memd(memd), + .memq(memq), + .memcen(memcen), + .memwen(memwen) +); + + +endmodule \ No newline at end of file diff --git a/logical/SRAM/logical/SRAM_wrapper.v b/logical/SRAM/logical/SRAM_wrapper.v index c34c7a5bf6fd669c3e1776ce3009eea1ca9e50f6..22e6267119cfa39ae8d4d6ceddf0b1c4f0bf3aa9 100644 --- a/logical/SRAM/logical/SRAM_wrapper.v +++ b/logical/SRAM/logical/SRAM_wrapper.v @@ -1,5 +1,5 @@ //----------------------------------------------------------------------------- -// Expansion Subsystem SRAM Wrapper +// Megasoc SRAM Wrapper // A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. // // Contributors @@ -18,7 +18,7 @@ module SRAM_wrapper( input wire AWVALID, output wire AWREADY, - input wire [3:0] AWID, + input wire [5:0] AWID, input wire [31:0] AWADDR, input wire [7:0] AWLEN, input wire [2:0] AWSIZE, @@ -36,12 +36,12 @@ module SRAM_wrapper( output wire BVALID, input wire BREADY, - output wire [3:0] BID, + output wire [5:0] BID, output wire [1:0] BRESP, input wire ARVALID, output wire ARREADY, - input wire [3:0] ARID, + input wire [5:0] ARID, input wire [31:0] ARADDR, input wire [7:0] ARLEN, input wire [2:0] ARSIZE, @@ -52,7 +52,7 @@ module SRAM_wrapper( output wire RVALID, input wire RREADY, - output wire [3:0] RID, + output wire [5:0] RID, output wire [63:0] RDATA, output wire [1:0] RRESP, output wire RLAST, @@ -75,19 +75,19 @@ module SRAM_wrapper( ); -wire [13:0] memaddr; +wire [19:0] memaddr; wire [63:0] memd; wire [63:0] memq; wire memcen; wire [7:0] memwen; -sie300_axi5_sram_ctrl_expansion_subsystem u_SMC( +sie300_axi5_sram_ctrl_1 u_SMC( .aclk(ACLK), .aresetn(ARESETn), .awvalid_s(AWVALID), .awready_s(AWREADY), .awid_s(AWID), - .awaddr_s(AWADDR[13:0]), + .awaddr_s(AWADDR[19:0]), .awlen_s(AWLEN), .awsize_s(AWSIZE), .awburst_s(AWBURST), @@ -107,7 +107,7 @@ sie300_axi5_sram_ctrl_expansion_subsystem u_SMC( .arvalid_s(ARVALID), .arready_s(ARREADY), .arid_s(ARID), - .araddr_s(ARADDR[13:0]), + .araddr_s(ARADDR[19:0]), .arlen_s(ARLEN), .arsize_s(ARSIZE), .arburst_s(ARBURST), @@ -140,14 +140,22 @@ sie300_axi5_sram_ctrl_expansion_subsystem u_SMC( .memwen(memwen) ); -SRAM u_SRAM( - .clk(ACLK), - .memaddr(memaddr), - .memd(memd), - .memq(memq), - .memcen(memcen), - .memwen(memwen) +cmsdk_fpga_sram #(.AW(19)) u_fpga_sram_0( + .CLK(ACLK), + .ADDR(memaddr), + .WDATA(memd[31:0]), + .WREN(memwen[3:0]), + .CS(memcen), + .RDATA(memq[31:0]) ); +cmsdk_fpga_sram #(.AW(19)) u_fpga_sram_1( + .CLK(ACLK), + .ADDR(memaddr), + .WDATA(memd[63:32]), + .WREN(memwen[7:4]), + .CS(memcen), + .RDATA(memq[63:32]) +); endmodule \ No newline at end of file diff --git a/logical/ahb_SRAM/FPGA/sl_ahb_sram.v b/logical/ahb_SRAM/FPGA/sl_ahb_sram.v new file mode 100644 index 0000000000000000000000000000000000000000..4d2d734670bd6d6e6198782d048ae66a2cfbb077 --- /dev/null +++ b/logical/ahb_SRAM/FPGA/sl_ahb_sram.v @@ -0,0 +1,56 @@ +//----------------------------------------------------------------------------- +// SoCLabs FPGA SRAM Wrapper +// - to be substitued with same name file in filelist when moving to ASIC +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// David Mapstone (d.a.mapstone@soton.ac.uk) +// +// Copyright 2021-3, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- + +module sl_ahb_sram #( + // System Parameters + parameter SYS_DATA_W = 32, // System Data Width + parameter RAM_ADDR_W = 21, // Size of SRAM + parameter RAM_DATA_W = 32 // Data Width of RAM +)( + // -------------------------------------------------------------------------- + // Port Definitions + // -------------------------------------------------------------------------- + input wire HCLK, // system bus clock + input wire HRESETn, // system bus reset + input wire HSEL, // AHB peripheral select + input wire HREADY, // AHB ready input + input wire [1:0] HTRANS, // AHB transfer type + input wire [2:0] HSIZE, // AHB hsize + input wire HWRITE, // AHB hwrite + input wire [RAM_ADDR_W-1:0] HADDR, // AHB address bus + input wire [SYS_DATA_W-1:0] HWDATA, // AHB write data bus + output wire HREADYOUT, // AHB ready output to S->M mux + output wire HRESP, // AHB response + output wire [SYS_DATA_W-1:0] HRDATA // AHB read data bus +); + + + // AHB to SRAM Behavioural +cmsdk_ahb_ram_beh #( + .AW(21), + .filename("app_flash.v8-a.hex"), + .WS_N(0), + .WS_S(0)) u_ahb_sram ( + .HCLK(HCLK), // Clock + .HRESETn(HRESETn), // Reset + .HSEL(HSEL), // Device select + .HADDR(HADDR), // Address + .HTRANS(HTRANS), // Transfer control + .HSIZE(HSIZE), // Transfer size + .HWRITE(HWRITE), // Write control + .HWDATA(HWDATA), // Write data + .HREADY(HREADY), // Transfer phase done + .HREADYOUT(HREADYOUT), // Device ready + .HRDATA(HRDATA), // Read data output + .HRESP(HRESP) + ); +endmodule \ No newline at end of file diff --git a/logical/ahb_SRAM/behavioural/sl_ahb_sram.v b/logical/ahb_SRAM/behavioural/sl_ahb_sram.v new file mode 100644 index 0000000000000000000000000000000000000000..bf411e476ecb8c2ada2b2ab073978a69a00838e1 --- /dev/null +++ b/logical/ahb_SRAM/behavioural/sl_ahb_sram.v @@ -0,0 +1,63 @@ +//----------------------------------------------------------------------------- +// SoCLabs FPGA SRAM Wrapper +// - to be substitued with same name file in filelist when moving to ASIC +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// David Mapstone (d.a.mapstone@soton.ac.uk) +// +// Copyright 2021-3, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- + +module sl_ahb_sram #( + // System Parameters + parameter SYS_DATA_W = 32, // System Data Width + parameter RAM_ADDR_W = 19, // Size of SRAM + parameter RAM_DATA_W = 32 // Data Width of RAM +)( + // -------------------------------------------------------------------------- + // Port Definitions + // -------------------------------------------------------------------------- + input wire HCLK, // system bus clock + input wire HRESETn, // system bus reset + input wire HSEL, // AHB peripheral select + input wire HREADY, // AHB ready input + input wire [1:0] HTRANS, // AHB transfer type + input wire [2:0] HSIZE, // AHB hsize + input wire HWRITE, // AHB hwrite + input wire [RAM_ADDR_W-1:0] HADDR, // AHB address bus + input wire [SYS_DATA_W-1:0] HWDATA, // AHB write data bus + output wire HREADYOUT, // AHB ready output to S->M mux + output wire HRESP, // AHB response + output wire [SYS_DATA_W-1:0] HRDATA // AHB read data bus +); + + + // AHB to SRAM Behavioural + +wire [31:0] SRAMRDATA; +wire [RAM_ADDR_W-3:0] SRAMADDR; +wire [3:0] SRAMWEN; +wire [31:0] SRAMWDATA; +wire SRAMCS; + +cmsdk_ahb_ram_beh #( + .AW(21), + .filename("app_flash.v8-a.hex"), + .WS_N(0), + .WS_S(0)) u_ahb_sram ( + .HCLK(HCLK), // Clock + .HRESETn(HRESETn), // Reset + .HSEL(HSEL), // Device select + .HADDR(HADDR), // Address + .HTRANS(HTRANS), // Transfer control + .HSIZE(HSIZE), // Transfer size + .HWRITE(HWRITE), // Write control + .HWDATA(HWDATA), // Write data + .HREADY(HREADY), // Transfer phase done + .HREADYOUT(HREADYOUT), // Device ready + .HRDATA(HRDATA), // Read data output + .HRESP(HRESP) + ); +endmodule \ No newline at end of file diff --git a/logical/ahb_SRAM/sl_ahb_sram.v b/logical/ahb_SRAM/sl_ahb_sram.v new file mode 100644 index 0000000000000000000000000000000000000000..ceb55b3c51c96b08c3aaf40fc8591f6f7429bf8d --- /dev/null +++ b/logical/ahb_SRAM/sl_ahb_sram.v @@ -0,0 +1,94 @@ +//----------------------------------------------------------------------------- +// SoCLabs FPGA SRAM Wrapper +// - to be substitued with same name file in filelist when moving to ASIC +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// David Mapstone (d.a.mapstone@soton.ac.uk) +// +// Copyright 2021-3, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- + +module sl_ahb_sram #( + // System Parameters + parameter SYS_DATA_W = 32, // System Data Width + parameter RAM_ADDR_W = 19, // Size of SRAM + parameter RAM_DATA_W = 32 // Data Width of RAM +)( + // -------------------------------------------------------------------------- + // Port Definitions + // -------------------------------------------------------------------------- + input wire HCLK, // system bus clock + input wire HRESETn, // system bus reset + input wire HSEL, // AHB peripheral select + input wire HREADY, // AHB ready input + input wire [1:0] HTRANS, // AHB transfer type + input wire [2:0] HSIZE, // AHB hsize + input wire HWRITE, // AHB hwrite + input wire [RAM_ADDR_W-1:0] HADDR, // AHB address bus + input wire [SYS_DATA_W-1:0] HWDATA, // AHB write data bus + output wire HREADYOUT, // AHB ready output to S->M mux + output wire HRESP, // AHB response + output wire [SYS_DATA_W-1:0] HRDATA // AHB read data bus +); + + + // AHB to SRAM Behavioural + +wire [31:0] SRAMRDATA; +wire [RAM_ADDR_W-3:0] SRAMADDR; +wire [3:0] SRAMWEN; +wire [31:0] SRAMWDATA; +wire SRAMCS; + +cmsdk_ahb_to_sram #(.AW(RAM_ADDR_W)) u_ahb_to_sram( + .HCLK(HCLK), // Clock + .HRESETn(HRESETn), // Reset + .HSEL(HSEL), // Device select + .HADDR(HADDR), // Address + .HTRANS(HTRANS), // Transfer control + .HSIZE(HSIZE), // Transfer size + .HWRITE(HWRITE), // Write control + .HWDATA(HWDATA), // Write data + .HREADY(HREADY), // Transfer phase done + .HREADYOUT(HREADYOUT), // Device ready + .HRDATA(HRDATA), // Read data output + .HRESP(HRESP), + + .SRAMRDATA(SRAMRDATA), + .SRAMADDR(SRAMADDR), + .SRAMWEN(SRAMWEN), + .SRAMWDATA(SRAMWDATA), + .SRAMCS(SRAMCS) +); + + +cmsdk_fpga_sram #(.AW(RAM_ADDR_W)) u_fpga_sram( + .CLK(HCLK), + .ADDR(SRAMADDR), + .WDATA(SRAMWDATA), + .WREN(SRAMWEN), + .CS(SRAMCS), + .RDATA(SRAMRDATA) +); + +// cmsdk_ahb_ram_beh #( +// .AW(21), +// .filename("app_flash.v8-a.hex"), +// .WS_N(0), +// .WS_S(0)) u_ahb_sram ( +// .HCLK(HCLK), // Clock +// .HRESETn(HRESETn), // Reset +// .HSEL(HSEL), // Device select +// .HADDR(HADDR), // Address +// .HTRANS(HTRANS), // Transfer control +// .HSIZE(HSIZE), // Transfer size +// .HWRITE(HWRITE), // Write control +// .HWDATA(HWDATA), // Write data +// .HREADY(HREADY), // Transfer phase done +// .HREADYOUT(HREADYOUT), // Device ready +// .HRDATA(HRDATA), // Read data output +// .HRESP(HRESP) +// ); +endmodule \ No newline at end of file diff --git a/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_cpu_ss.v b/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_cpu_ss.v new file mode 100644 index 0000000000000000000000000000000000000000..387800e6b113f7e85f9746a0d5a8dcb4045d38d4 --- /dev/null +++ b/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_cpu_ss.v @@ -0,0 +1,588 @@ +//----------------------------------------------------------------------------- +// MegaSoC CPU Subsystem +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Purpose: +// CPU subsystem combines CPU (currently only A53 although may be a wrapper for +// different CPU instantiations at some point), interrupt controller and debugger +//----------------------------------------------------------------------------- +// Modules instantiated: +// CortexA53_1 (u_cortexa53) +// DAPLITE (u_daplite) +// megasoc_irq_sync (u_megasoc_irq_sync) +// GIC400 (u_GIC400) + +module megasoc_cpu_ss #( + parameter NUM_GICRID_BITS = 12, + parameter NUM_GICWID_BITS = 12, + parameter NUM_SPIS = 480 + +)( + // Clock and Reset signals + input wire CPU_CLK, + input wire RESETn, + + // ACE Interface; Clock and Configuration Signals + input wire ACLKENM, + input wire ACINACTM, + output wire [ 7: 0] RDMEMATTR, + output wire [ 7: 0] WRMEMATTR, + // ACE Interface; Write Address Channel Signals + input wire AWREADYM, + output wire AWVALIDM, + output wire [ 5: 0] AWIDM, + output wire [ 43: 0] AWADDRM, + output wire [ 7: 0] AWLENM, + output wire [ 2: 0] AWSIZEM, + output wire [ 1: 0] AWBURSTM, + output wire AWLOCKM, + output wire [ 3: 0] AWCACHEM, + output wire [ 2: 0] AWPROTM, + // ACE Interface; Write Data Channel Signals + input wire WREADYM, + output wire WVALIDM, + output wire [ 5: 0] WIDM, + output wire [127: 0] WDATAM, + output wire [ 15: 0] WSTRBM, + output wire WLASTM, + // ACE Interface; Write Response Channel Signals + output wire BREADYM, + input wire BVALIDM, + input wire [ 5: 0] BIDM, + input wire [ 1: 0] BRESPM, + // ACE Interface; Read Address Channel Signals + input wire ARREADYM, + output wire ARVALIDM, + output wire [ 5: 0] ARIDM, + output wire [ 43: 0] ARADDRM, + output wire [ 7: 0] ARLENM, + output wire [ 2: 0] ARSIZEM, + output wire [ 1: 0] ARBURSTM, + output wire ARLOCKM, + output wire [ 3: 0] ARCACHEM, + output wire [ 2: 0] ARPROTM, + // ACE Interface; Read Data Channel Signals + output wire RREADYM, + input wire RVALIDM, + input wire [ 5: 0] RIDM, + input wire [127: 0] RDATAM, + input wire [ 1: 0] RRESPM, + input wire RLASTM, + // APB Interface Signals + input wire nPRESETDBG, + input wire PCLKENDBG, + input wire PSELDBG, + input wire [ 30: 2] PADDRDBG, + input wire PENABLEDBG, + input wire PWRITEDBG, + input wire [ 31: 0] PWDATADBG, + output wire [ 31: 0] PRDATADBG, + output wire PREADYDBG, + output wire PSLVERRDBG, + + // DAP-LITE external signals + input wire nTRST, + input wire SWCLKTCK, + input wire SWDITMS, + input wire TDI, + output wire TDO, + output wire nTDOEN, + output wire SWDO, + output wire SWDOEN, + + // GIC AXI interface signals + // AXI read address channel + input wire [NUM_GICRID_BITS-1:0] GIC_ARID, + input wire [14:0] GIC_ARADDR, + input wire [7:0] GIC_ARLEN, + input wire [2:0] GIC_ARSIZE, + input wire [1:0] GIC_ARBURST, + input wire [2:0] GIC_ARPROT, + input wire [2:0] GIC_ARUSER, + input wire GIC_ARVALID, + output wire GIC_ARREADY, + + // AXI read data channel + output wire [NUM_GICRID_BITS-1:0] GIC_RID, + output wire [31:0] GIC_RDATA, + output wire GIC_RLAST, + output wire [1:0] GIC_RRESP, + output wire GIC_RVALID, + input wire GIC_RREADY, + + // AXI write address channel + input wire [NUM_GICWID_BITS-1:0] GIC_AWID, + input wire [14:0] GIC_AWADDR, + input wire [7:0] GIC_AWLEN, + input wire [2:0] GIC_AWSIZE, + input wire [1:0] GIC_AWBURST, + input wire [2:0] GIC_AWPROT, + input wire [2:0] GIC_AWUSER, + input wire GIC_AWVALID, + output wire GIC_AWREADY, + + // AXI write data channel + input wire [31:0] GIC_WDATA, + input wire [3:0] GIC_WSTRB, + input wire GIC_WVALID, + output wire GIC_WREADY, + + // AXI write response channel + output wire [NUM_GICWID_BITS-1:0] GIC_BID, + output wire [1:0] GIC_BRESP, + output wire GIC_BVALID, + input wire GIC_BREADY, + + input wire [NUM_SPIS-1:0] IRQs + +); + + localparam NUM_CPUS = 1; + localparam NUM_EXP_SHD_INT = 64; + + wire [(NUM_CPUS-1):0] cfg_cfgend; + wire [(NUM_CPUS-1):0] cfg_aa64naa32; + wire [(NUM_CPUS-1):0] cfg_vinithi; + wire [(NUM_CPUS-1):0] cfg_cfgte; + wire [39:18] cfg_periphbase; + wire [7:0] cfg_clusterid; + + + assign cfg_cfgend = {NUM_CPUS{1'b0}}; + assign cfg_aa64naa32 = {NUM_CPUS{1'b1}}; + assign cfg_vinithi = {NUM_CPUS{1'b1}}; + assign cfg_cfgte = {NUM_CPUS{1'b1}}; + assign cfg_periphbase = 22'b0000000001000000000000; + assign cfg_clusterid = 8'h00; + + + wire [(NUM_CPUS-1):0] nFIQCPU; + wire [(NUM_CPUS-1):0] nIRQCPU; + wire [(NUM_CPUS-1):0] nVFIQCPU; + wire [(NUM_CPUS-1):0] nVIRQCPU; + wire [(NUM_CPUS-1):0] nFIQOUT; + wire [(NUM_CPUS-1):0] nIRQOUT; + + wire [(NUM_CPUS-1):0] nCNTPSIRQ; + wire [(NUM_CPUS-1):0] nCNTPNSIRQ; + wire [(NUM_CPUS-1):0] nCNTVIRQ; + wire [(NUM_CPUS-1):0] nCNTHPIRQ; + + wire [31:0] PRDATADBG_CPU; + wire PREADYDBG_CPU; + wire PSLVERRDBG_CPU; + wire [31:2] PADDRDBG_CPU; + wire PSELDBG_CPU; + wire PWRITEDBG_CPU; + wire PENABLEDBG_CPU; + wire [31:0] PWDATADBG_CPU; + + assign AWIDM[5] = 1'b0; + assign WIDM[5] = 1'b0; + assign BIDM[5] = 1'b0; + CortexA53_1 + u_cortexa53 + (// Clocks and resets + .CLKIN (CPU_CLK), + .nCPUPORESET ({NUM_CPUS{RESETn}}), + .nCORERESET ({NUM_CPUS{RESETn}}), + .nPRESETDBG (RESETn), + .nL2RESET (RESETn), + .nMBISTRESET (1'b1), // No MBIST + .L2RSTDISABLE (1'b0), + .WARMRSTREQ (), + + // Configuration signals + .CFGEND (cfg_cfgend), + .VINITHI (cfg_vinithi), + .CFGTE (cfg_cfgte), + .CP15SDISABLE ({NUM_CPUS{1'b0}}), + .CLUSTERIDAFF1 (cfg_clusterid), + .CLUSTERIDAFF2 (8'h00), + .AA64nAA32 (cfg_aa64naa32), + .RVBARADDR0 ({38{1'b0}}), + + // Interrupt signals + .nFIQ (nFIQCPU), + .nIRQ (nIRQCPU), + .nSEI ({NUM_CPUS{1'b1}}), + .nVFIQ (nVFIQCPU), + .nVIRQ (nVIRQCPU), + .nVSEI ({NUM_CPUS{1'b1}}), + .nREI ({NUM_CPUS{1'b1}}), + .nVCPUMNTIRQ (), + .PERIPHBASE (cfg_periphbase), + .GICCDISABLE (1'b1), + .ICDTVALID (1'b0), + .ICDTREADY (), + .ICDTDATA ({16{1'b0}}), + .ICDTLAST (1'b0), + .ICDTDEST (2'b00), + .ICCTVALID (), + .ICCTREADY (1'b0), + .ICCTDATA (), + .ICCTLAST (), + .ICCTID (), + + // Generic timer signals + .CNTVALUEB (64'd0), + .CNTCLKEN (1'b1), + .nCNTPNSIRQ (nCNTPNSIRQ), + .nCNTPSIRQ (nCNTPSIRQ), + .nCNTVIRQ (nCNTVIRQ), + .nCNTHPIRQ (nCNTHPIRQ), + + // Power management signals + .CLREXMONREQ (1'b0), + .CLREXMONACK (), + .EVENTI (1'b0), + .EVENTO (), + .STANDBYWFI (), + .STANDBYWFE (), + .STANDBYWFIL2 (), + .L2FLUSHREQ (1'b0), + .L2FLUSHDONE (), + .SMPEN (), + .CPUQACTIVE (), + .CPUQREQn ({NUM_CPUS{1'b1}}), + .CPUQDENY (), + .CPUQACCEPTn (), + .NEONQACTIVE (), + .NEONQREQn ({NUM_CPUS{1'b1}}), + .NEONQDENY (), + .NEONQACCEPTn (), + .L2QACTIVE (), + .L2QREQn (1'b1), + .L2QDENY (), + .L2QACCEPTn (), + + + // ACE/Skyros interface signals + // NB. the execution testbench bus model is a simple single master, + // single slave system and therefore cache maintenance operations are + // disabled. The bus model is a simple in-order device so barriers are + // also disabled in the ACE configuration. However, in a Skyros system + // SYSBARDISABLE must be HIGH, so the Skyros subsystem does process + // barriers. + .nEXTERRIRQ (), + .BROADCASTCACHEMAINT (1'b0), + .BROADCASTINNER (1'b0), + .BROADCASTOUTER (1'b0), + .SYSBARDISABLE (1'b1), + + // ACE interface + // - Clock and configuration signals + .ACLKENM (1'b1), + .ACINACTM (1'b1), + .RDMEMATTR (), + .WRMEMATTR (), + // - Write address channel signals + .AWREADYM (AWREADYM), + .AWVALIDM (AWVALIDM), + .AWIDM (AWIDM[4:0]), + .AWADDRM (AWADDRM), + .AWLENM (AWLENM), + .AWSIZEM (AWSIZEM), + .AWBURSTM (AWBURSTM), + .AWBARM (), + .AWDOMAINM (), + + .AWLOCKM (AWLOCKM), + .AWCACHEM (AWCACHEM), + .AWPROTM (AWPROTM), + .AWSNOOPM (), + .AWUNIQUEM (), + // - Write data channel signals + .WREADYM (WREADYM), + .WVALIDM (WVALIDM), + .WIDM (WIDM[4:0]), + .WDATAM (WDATAM), + .WSTRBM (WSTRBM), + .WLASTM (WLASTM), + // - Write response channel signals + .BREADYM (BREADYM), + .BVALIDM (BVALIDM), + .BIDM (BIDM[4:0]), + .BRESPM (BRESPM), + // - Read address channel signals + .ARREADYM (ARREADYM), + .ARVALIDM (ARVALIDM), + .ARIDM (ARIDM), + .ARADDRM (ARADDRM), + .ARLENM (ARLENM), + .ARSIZEM (ARSIZEM), + .ARBURSTM (ARBURSTM), + .ARBARM (), + .ARDOMAINM (), + + .ARLOCKM (ARLOCKM), + .ARCACHEM (ARCACHEM), + .ARPROTM (ARPROTM), + .ARSNOOPM (), + // - Read data channel signals + .RREADYM (RREADYM), + .RVALIDM (RVALIDM), + .RIDM (RIDM), + .RDATAM (RDATAM), + .RRESPM ({2'b00, RRESPM}), + .RLASTM (RLASTM), + // - Coherency address channel signals + .ACREADYM (), + .ACVALIDM (1'b0), + .ACADDRM (44'h0), + .ACPROTM (3'b000), + .ACSNOOPM (4'h0), + // - Coherency response channel signals + .CRREADYM (1'b0), + .CRVALIDM (), + .CRRESPM (), + // - Coherency data channel signals + .CDREADYM (1'b0), + .CDVALIDM (), + .CDDATAM (), + .CDLASTM (), + // - Read/write acknowledge signals + .RACKM (), + .WACKM (), + + // Debug APB interface signals + .PCLKENDBG (1'b1), + .PSELDBG (PSELDBG_CPU), + .PADDRDBG (PADDRDBG_CPU[21:2]), + .PADDRDBG31 (PADDRDBG_CPU[31]), + .PENABLEDBG (PENABLEDBG_CPU), + .PWRITEDBG (PWRITEDBG_CPU), + .PWDATADBG (PWDATADBG_CPU), + .PRDATADBG (PRDATADBG_CPU), + .PREADYDBG (PREADYDBG_CPU), + .PSLVERRDBG (PSLVERRDBG_CPU), + + // Miscellaneous debug signals + .DBGROMADDR ({28{1'b0}}), + .DBGROMADDRV (1'b0), + .DBGACK (), + .nCOMMIRQ (), + .COMMRX (), + .COMMTX (), + .EDBGRQ ({NUM_CPUS{1'b0}}), + .DBGEN ({NUM_CPUS{1'b1}}), + .NIDEN ({NUM_CPUS{1'b1}}), + .SPIDEN ({NUM_CPUS{1'b1}}), + .SPNIDEN ({NUM_CPUS{1'b1}}), + .DBGRSTREQ (), + .DBGNOPWRDWN (), + .DBGPWRDUP ({NUM_CPUS{1'b1}}), + .DBGPWRUPREQ (), + .DBGL1RSTDISABLE (1'b0), + + // ATB interface signals + .ATCLKEN (1'b1), + .ATREADYM0 (1'b1), + .AFVALIDM0 (1'b0), + .ATDATAM0 (), + .ATVALIDM0 (), + .ATBYTESM0 (), + .AFREADYM0 (), + .ATIDM0 (), + + + // Miscellaneous ETM signals + .SYNCREQM0 (1'b0), + .TSVALUEB (64'd0), + + // CTI interface signals: + .CTICHIN (4'h0), + .CTICHOUTACK (4'h0), + .CTICHOUT (), + .CTICHINACK (), + .CISBYPASS (1'b1), + .CIHSBYPASS ({4{1'b1}}), + .CTIIRQ (), + .CTIIRQACK ({NUM_CPUS{1'b1}}), + + // PMU signals + .nPMUIRQ (), + .PMUEVENT0 (), + + // DFT signals + .DFTSE (1'b0), + .DFTRSTDISABLE (1'b0), + .DFTRAMHOLD (1'b0), + .DFTMCPHOLD (1'b0), + + // MBIST interface signals + .MBISTREQ (1'b0) + ); + + +DAPLITE u_daplite( +// External power-on reset + + .nPOTRST(RESETn), // Power-on reset + +// External JTAG/SW connections to SWJDP-DP + + .nTRST(nTRST), // TAP Reset (Asynchronous) + .SWCLKTCK(SWCLKTCK), // TAP Clock + .SWDITMS(SWDITMS), // TAP Mode/ SW Data In + .TDI(TDI), // JTAG TAP Data In . + .TDO(TDO), // Asynchronous JTAG TAP Data Out + .nTDOEN(nTDOEN), // Asynchronous JTAG TAP Data Out Enable + .SWDO(SWDO), // SW Data Out + .SWDOEN(SWDOEN), // SW Data Out Enable + +// SWJ-DP Status + .JTAGNSW(), // Current TAP Mode of operation + .JTAGTOP(), // JTAG TAP controller in one of top 4 states; + // i.e. TLR, RTI, Sel-DR or Sel-IR + +// SWJ-DP power and reset controller interface + + .CDBGPWRUPACK(1'b1), // Debug Power Domain power-up acknowledge + .CSYSPWRUPACK(1'b1), // System Power Domain power-up acknowledge + .CDBGRSTACK(1'b1), // Debug reset acknowledge from reset controller + .CDBGPWRUPREQ(), // Debug Power Domain power-up request + .CSYSPWRUPREQ(), // System Power Domain power-up request + .CDBGRSTREQ(), // Debug reset request to reset controller + +// Power Domain controls + + .nCDBGPWRDN(1'b1), // Debug infrastructure power-down control + .nCSOCPWRDN(1'b1), // External system (SOC domain) + // power-down control + + // APB-AP device enable input + .DEVICEEN(1'b1), // Device enable + + // Software Access Enable + .DBGSWENABLE(), // Provided to block/grant software access to + // things other than the APB-Mux + + // APB-MUX port connections + // System APB port + + // System Clock / Reset Pins + .PCLKSYS(CPU_CLK), // System APB clock (typically HCLK) + .PCLKENSYS(1'b1), // Enable term for PCLKSYS domain + .PRESETSYSn(RESETn), // Resets the APB interface connected to + // the system bus + + // System Slave port (driven by system APB) + .PADDRSYS(PADDRDBG), // System APB address bus + .PSELSYS(PSELDBG), // System APB select + .PWRITESYS(PWRITEDBG), // System APB write access + .PENABLESYS(PENABLEDBG), // System APB enable signal - indicates second + // and subsequent cycles of an APB transfer + .PWDATASYS(PWDATADBG), // System APB Write data bus + .PRDATASYS(PRDATADBG), // System APB write data bus + .PREADYSYS(PREADYDBG), // System APB Ready signal + .PSLVERRSYS(PSLVERRDBG), // System APB transfer error signal + + //Debug APB port + + .PCLKDBG(CPU_CLK), // Debug APB clock + .PCLKENDBG(1'b1), // Enable term for PCLKDBG domain + .PRESETDBGn(RESETn), // Reset for arbitration logic, + // APB-AP slave port and + // Debug-APB master port + .PRDATADBG(PRDATADBG_CPU), // CoreSight Peripheral Read data bus + .PREADYDBG(PREADYDBG_CPU), // Debug-APB Ready signal + .PSLVERRDBG(PSLVERRDBG_CPU), // Debug-APB transfer error signal + .PADDRDBG(PADDRDBG_CPU), // Debug-APB address bus + .PSELDBG(PSELDBG_CPU), // Debug-APB select + .PWRITEDBG(PWRITEDBG_CPU), // Debug-APB write access + .PENABLEDBG(PENABLEDBG_CPU), // Debug-APB enable signal + .PWDATADBG(PWDATADBG_CPU), // Debug-APB write data bus + +// Scan enable connection + + .SE(1'b0) // Scan Enable + +); + +wire [NUM_SPIS-1:0] IRQs_ss; + +megasoc_irq_sync #(.NUM_SPIS(NUM_SPIS)) u_megasoc_irq_sync( + .CLK(CPU_CLK), + .RESETn(RESETn), + .SPI_i(IRQs), + .SPI_o(IRQs_ss) +); + + +GIC400 #( + .NUM_CPUS(NUM_CPUS), + .NUM_SPIS(NUM_SPIS), + .NUM_WID_BITS(NUM_GICWID_BITS), + .NUM_RID_BITS(NUM_GICRID_BITS) +) u_GIC400( + .CLK(CPU_CLK), + .nRESET(RESETn), + .DFTRSTDISABLE(1'b0), + .DFTSE(1'b0), + + .CFGSDISABLE(1'b0), + + .ARID(GIC_ARID), + .ARADDR(GIC_ARADDR), + .ARLEN(GIC_ARLEN), + .ARSIZE(GIC_ARSIZE), + .ARBURST(GIC_ARBURST), + .ARPROT(GIC_ARPROT), + .ARUSER(GIC_ARUSER), + .ARVALID(GIC_ARVALID), + .ARREADY(GIC_ARREADY), + + .RID(GIC_RID), + .RDATA(GIC_RDATA), + .RLAST(GIC_RLAST), + .RRESP(GIC_RRESP), + .RVALID(GIC_RVALID), + .RREADY(GIC_RREADY), + + .AWID(GIC_AWID), + .AWADDR(GIC_AWADDR), + .AWLEN(GIC_AWLEN), + .AWSIZE(GIC_AWSIZE), + .AWBURST(GIC_AWBURST), + .AWPROT(GIC_AWPROT), + .AWUSER(GIC_AWUSER), + .AWVALID(GIC_AWVALID), + .AWREADY(GIC_AWREADY), + + .WDATA(GIC_WDATA), + .WSTRB(GIC_WSTRB), + .WVALID(GIC_WVALID), + .WREADY(GIC_WREADY), + + .BID(GIC_BID), + .BRESP(GIC_BRESP), + .BVALID(GIC_BVALID), + .BREADY(GIC_BREADY), + + .IRQS(IRQs_ss), + + .nLEGACYFIQ(1'b1), + .nLEGACYIRQ(1'b1), + .nCNTPSIRQ(nCNTPSIRQ), + .nCNTPNSIRQ(nCNTPNSIRQ), + .nCNTVIRQ(nCNTVIRQ), + .nCNTHPIRQ(nCNTHPIRQ), + + .nIRQCPU(nIRQCPU), + .nFIQCPU(nFIQCPU), + .nVIRQCPU(nVIRQCPU), + .nVFIQCPU(nVFIQCPU), + + .nIRQOUT(nIRQOUT), + .nFIQOUT(nFIQOUT) +); + + +endmodule \ No newline at end of file diff --git a/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_irq_sync.v b/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_irq_sync.v new file mode 100644 index 0000000000000000000000000000000000000000..7729c3748391f0557485ae2c8136eea17bff0e00 --- /dev/null +++ b/logical/megasoc_subsystems/megasoc_cpu_subsystem/megasoc_irq_sync.v @@ -0,0 +1,34 @@ +//----------------------------------------------------------------------------- +// MegaSoC Interrupt synchronizer +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Purpose: +// Synchronize interrupts to input clock. Needed as GIC has no internal synchronizers +//----------------------------------------------------------------------------- +// Modules instantiated: +// none + + +module megasoc_irq_sync #( + parameter NUM_SPIS=240 + )( + input wire CLK, + input wire RESETn, + input wire [NUM_SPIS-1:0] SPI_i, + output reg [NUM_SPIS-1:0] SPI_o +); + +always @(posedge CLK or negedge RESETn) begin + if(~RESETn) begin + SPI_o<={NUM_SPIS{1'b0}}; + end else begin + SPI_o<=SPI_i; + end +end +endmodule \ No newline at end of file diff --git a/logical/megasoc_subsystems/peripheral_subsystem/megasoc_peripheral_subsystem.v b/logical/megasoc_subsystems/peripheral_subsystem/megasoc_peripheral_subsystem.v new file mode 100644 index 0000000000000000000000000000000000000000..b038d88ae8caff7252106e513c62a0e6221978ae --- /dev/null +++ b/logical/megasoc_subsystems/peripheral_subsystem/megasoc_peripheral_subsystem.v @@ -0,0 +1,227 @@ +//----------------------------------------------------------------------------- +// MegaSoC Peripheral Subsystem +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Purpose: +// APB peripheral subsystem for SoC. Takes an APB input (subordinate port) and +// muxes to several peripherals including UART and timers +//----------------------------------------------------------------------------- +// Modules instantiated: +// cmsdk_apb_slave_mux (u_apb_slave_mux) +// cmsdk_apb_uart (u_apb_uart_0) +// cmsdk_apb_timer (u_apb_timer0) + +module megasoc_peripheral_subsystem( + input wire PCLK, + input wire PRESETn, + + // APB bus interface + input wire [31:0] PADDR, + input wire PENABLE, + input wire PWRITE, + input wire [31:0] PWDATA, + input wire PSEL, + output wire [31:0] PRDATA, + output wire PREADY, + output wire PSLVERR, + + input wire UARTRXD, + output wire UARTTXD, + output wire UARTTXEN, + + output wire [5:0] PERI_IRQS // Peripheral interrupts to GIC +); + +wire UARTCLK; +assign UARTCLK=PCLK; // TODO generate UARTCLK from elsewhere + +// Internal APB signals for UART0 +wire PSEL_UART0; +wire PREADY_UART0; +wire [31:0] PRDATA_UART0; +wire PSLVERR_UART0; + +// Internal APB signals for Timer0 +wire PSEL_TIMER0; +wire PREADY_TIMER0; +wire [31:0] PRDATA_TIMER0; +wire PSLVERR_TIMER0; + +// CMSDK APB Slave Mux (from Corstone 101) +cmsdk_apb_slave_mux #( + .PORT0_ENABLE(1), + .PORT1_ENABLE(1), + .PORT2_ENABLE(0), + .PORT3_ENABLE(0), + .PORT4_ENABLE(0), + .PORT5_ENABLE(0), + .PORT6_ENABLE(0), + .PORT7_ENABLE(0), + .PORT8_ENABLE(0), + .PORT9_ENABLE(0), + .PORT10_ENABLE(0), + .PORT11_ENABLE(0), + .PORT12_ENABLE(0), + .PORT13_ENABLE(0), + .PORT14_ENABLE(0), + .PORT15_ENABLE(0) + ) u_apb_slave_mux ( + .DECODE4BIT(PADDR[15:12]), + .PSEL(PSEL), + + .PSEL0(PSEL_UART0), + .PREADY0(PREADY_UART0), + .PRDATA0(PRDATA_UART0), + .PSLVERR0(PSLVERR_UART0), + + .PSEL1(PSEL_TIMER0), + .PREADY1(PREADY_TIMER0), + .PRDATA1(PRDATA_TIMER0), + .PSLVERR1(PSLVERR_TIMER0), + + .PSEL2(), + .PREADY2(1'b0), + .PRDATA2(32'd0), + .PSLVERR2(1'b0), + + .PSEL3(), + .PREADY3(1'b0), + .PRDATA3(32'd0), + .PSLVERR3(1'b0), + + .PSEL4(), + .PREADY4(1'b0), + .PRDATA4(32'd0), + .PSLVERR4(1'b0), + + .PSEL5(), + .PREADY5(1'b0), + .PRDATA5(32'd0), + .PSLVERR5(1'b0), + + .PSEL6(), + .PREADY6(1'b0), + .PRDATA6(32'd0), + .PSLVERR6(1'b0), + + .PSEL7(), + .PREADY7(1'b0), + .PRDATA7(32'd0), + .PSLVERR7(1'b0), + + .PSEL8(), + .PREADY8(1'b0), + .PRDATA8(32'd0), + .PSLVERR8(1'b0), + + .PSEL9(), + .PREADY9(1'b0), + .PRDATA9(32'd0), + .PSLVERR9(1'b0), + + .PSEL10(), + .PREADY10(1'b0), + .PRDATA10(32'd0), + .PSLVERR10(1'b0), + + .PSEL11(), + .PREADY11(1'b0), + .PRDATA11(32'd0), + .PSLVERR11(1'b0), + + .PSEL12(), + .PREADY12(1'b0), + .PRDATA12(32'd0), + .PSLVERR12(1'b0), + + .PSEL13(), + .PREADY13(1'b0), + .PRDATA13(32'd0), + .PSLVERR13(1'b0), + + .PSEL14(), + .PREADY14(1'b0), + .PRDATA14(32'd0), + .PSLVERR14(1'b0), + + .PSEL15(), + .PREADY15(1'b0), + .PRDATA15(32'd0), + .PSLVERR15(1'b0), + + .PREADY(PREADY), + .PRDATA(PRDATA), + .PSLVERR(PSLVERR) +); + +wire uart0_txint; +wire uart0_rxint; +wire uart0_txovrint; +wire uart0_rxovrint; +wire uart0_combined_int; + +cmsdk_apb_uart u_apb_uart_0( + .PCLK (PCLK), // Peripheral clock + .PCLKG (PCLK), // Gated PCLK for bus + .PRESETn (PRESETn), // Reset + + .PSEL (PSEL_UART0), // APB interface inputs + .PADDR (PADDR[11:2]), + .PENABLE (PENABLE), + .PWRITE (PWRITE), + .PWDATA (PWDATA), + + .PRDATA (PRDATA_UART0), // APB interface outputs + .PREADY (PREADY_UART0), + .PSLVERR (PSLVERR_UART0), + + .ECOREVNUM (4'h0),// Engineering-change-order revision bits + + .RXD (UARTRXD), // Receive data + + .TXD (UARTTXD), // Transmit data + .TXEN (UARTTXEN), // Transmit Enabled + + .BAUDTICK (), // Baud rate x16 tick output (for testing) + + .TXINT (uart0_txint), // Transmit Interrupt + .RXINT (uart0_rxint), // Receive Interrupt + .TXOVRINT (uart0_txovrint), // Transmit Overrun Interrupt + .RXOVRINT (uart0_rxovrint), // Receive Overrun Interrupt + .UARTINT (uart0_combined_int) // Combined Interrupt +); + +assign PERI_IRQS[0] = uart0_txint; +assign PERI_IRQS[1] = uart0_rxint; +assign PERI_IRQS[2] = uart0_txovrint; +assign PERI_IRQS[3] = uart0_rxovrint; +assign PERI_IRQS[4] = uart0_combined_int; + +wire timer0_int; + +cmsdk_apb_timer u_apb_timer0( + .PCLK(PCLK), // PCLK for timer operation + .PCLKG(PCLK), // Gated clock + .PRESETn(PRESETn), // Reset + .PSEL(PSEL_TIMER0), // Device select + .PADDR(PADDR[11:2]), // Address + .PENABLE(PENABLE), // Transfer control + .PWRITE(PWRITE), // Write control + .PWDATA(PWDATA), // Write data + .ECOREVNUM(4'h0),// Engineering-change-order revision bits + .PRDATA(PRDATA_TIMER0), // Read data + .PREADY(PREADY_TIMER0), // Device ready + .PSLVERR(PSLVERR_TIMER0), // Device error response + .EXTIN(1'b1), // Extenal input + .TIMERINT(timer0_int) +); + +assign PERI_IRQS[5] = timer0_int; + +endmodule \ No newline at end of file diff --git a/logical/sl_ahb_qspi b/logical/sl_ahb_qspi new file mode 160000 index 0000000000000000000000000000000000000000..88b0cf65a75e05d52ef742745cce1c903753f7fd --- /dev/null +++ b/logical/sl_ahb_qspi @@ -0,0 +1 @@ +Subproject commit 88b0cf65a75e05d52ef742745cce1c903753f7fd diff --git a/logical/top_megasoc_tech/megasoc_tech_system_wrapper.v b/logical/top_megasoc_tech/megasoc_tech_system_wrapper.v new file mode 100644 index 0000000000000000000000000000000000000000..f03dbf60d15b18f2f35bc4e0de46175aa6c59c9a --- /dev/null +++ b/logical/top_megasoc_tech/megasoc_tech_system_wrapper.v @@ -0,0 +1,28 @@ +//----------------------------------------------------------------------------- +// MegaSoC Tech System Wrapper +// A joint work commissioned on behalf of SoC Labs, under Arm Academic Access license. +// +// Contributors +// +// Daniel Newbrook (d.newbrook@soton.ac.uk) +// +// Copyright � 2021-4, SoC Labs (www.soclabs.org) +//----------------------------------------------------------------------------- +// Purpose: +// Top level wrapper for the megasoc "System" subsystem. This subsystem includes +// the filesystem and similar peripherals (ethernet/PCIe). Uses DMA to move from +// these peripherals to DRAM +//----------------------------------------------------------------------------- +// Modules instantiated: +// +//----------------------------------------------------------------------------- +// To Do +// - Everything + +module megasoc_tech_system_wrapper( + +); + + + +endmodule \ No newline at end of file diff --git a/logical/top_megasoc_tech/megasoc_tech_wrapper.v b/logical/top_megasoc_tech/megasoc_tech_wrapper.v index 7799519add12261b319b349c657d54b92dc54e1e..93349acdaea2f73bd573ac956a9dea298af0174c 100644 --- a/logical/top_megasoc_tech/megasoc_tech_wrapper.v +++ b/logical/top_megasoc_tech/megasoc_tech_wrapper.v @@ -8,7 +8,22 @@ // // Copyright � 2021-4, SoC Labs (www.soclabs.org) //----------------------------------------------------------------------------- +// Purpose: +// Top level wrapper for the megasoc CPU subsystem. +//----------------------------------------------------------------------------- // Modules instantiated: +// megasoc_cpu_ss (u_megasoc_cpu_ss) +// nic400_megasoc_main (u_nic400_megasoc_main) +// ROM_wrapper (u_ROM_wrapper) +// sl_ahb_sram (u_sl_ahb_sram) +// SRAM_wrapper (u_SRAM_wrapper) +// megasoc_peripheral_subsystem (u_megasoc_peripheral_subsystem) +// +//----------------------------------------------------------------------------- +// To Do +// - Replace sl_ahb_sram with QSPI controller to use external flash +// - Add DDR controller for DRAM port on NIC400 + `timescale 1ns/1ps @@ -90,8 +105,785 @@ module megasoc_tech_wrapper( output wire [1:0] AXI_EXP_SYS_rresp, output wire AXI_EXP_SYS_rlast, output wire AXI_EXP_SYS_rvalid, - input wire AXI_EXP_SYS_rready + input wire AXI_EXP_SYS_rready, + + // QSPI Signals + output wire QSPI_SCLK, + output wire QSPI_nCS, + output wire [3:0] QSPI_IO_o, + input wire [3:0] QSPI_IO_i, + output wire [3:0] QSPI_IO_e, + + // UART signals + input wire UARTRXD, + output wire UARTTXD, + output wire UARTTXEN, + + // DAP-LITE external signals + input wire nTRST, + input wire SWCLKTCK, + input wire SWDITMS, + input wire TDI, + output wire TDO, + output wire nTDOEN, + output wire SWDO, + output wire SWDOEN +); + + +parameter ID_W=6; +parameter NUM_SPIS=480; + +wire CPU_AWREADYM; +wire CPU_AWVALIDM; +wire [ 5: 0] CPU_AWIDM; +wire [ 43: 0] CPU_AWADDRM; +wire [ 7: 0] CPU_AWLENM; +wire [ 2: 0] CPU_AWSIZEM; +wire [ 1: 0] CPU_AWBURSTM; +wire CPU_AWLOCKM; +wire [ 3: 0] CPU_AWCACHEM; +wire [ 2: 0] CPU_AWPROTM; +wire CPU_WREADYM; +wire CPU_WVALIDM; +wire [ 5: 0] CPU_WIDM; +wire [127: 0] CPU_WDATAM; +wire [ 15: 0] CPU_WSTRBM; +wire CPU_WLASTM; +wire CPU_BREADYM; +wire CPU_BVALIDM; +wire [ 4: 0] CPU_BIDM; +wire [ 1: 0] CPU_BRESPM; +wire CPU_ARREADYM; +wire CPU_ARVALIDM; +wire [ 5: 0] CPU_ARIDM; +wire [ 43: 0] CPU_ARADDRM; +wire [ 7: 0] CPU_ARLENM; +wire [ 2: 0] CPU_ARSIZEM; +wire [ 1: 0] CPU_ARBURSTM; +wire CPU_ARLOCKM; +wire [ 3: 0] CPU_ARCACHEM; +wire [ 2: 0] CPU_ARPROTM; +wire CPU_RREADYM; +wire CPU_RVALIDM; +wire [ 5: 0] CPU_RIDM; +wire [127: 0] CPU_RDATAM; +wire [ 1: 0] CPU_RRESPM; +wire CPU_RLASTM; + +wire [ID_W-1:0] GIC_ARID; +wire [14:0] GIC_ARADDR; +wire [7:0] GIC_ARLEN; +wire [2:0] GIC_ARSIZE; +wire [1:0] GIC_ARBURST; +wire [2:0] GIC_ARPROT; +wire [2:0] GIC_ARUSER; +wire GIC_ARVALID; +wire GIC_ARREADY; +wire [ID_W-1:0] GIC_RID; +wire [31:0] GIC_RDATA; +wire GIC_RLAST; +wire [1:0] GIC_RRESP; +wire GIC_RVALID; +wire GIC_RREADY; +wire [ID_W-1:0] GIC_AWID; +wire [14:0] GIC_AWADDR; +wire [7:0] GIC_AWLEN; +wire [2:0] GIC_AWSIZE; +wire [1:0] GIC_AWBURST; +wire [2:0] GIC_AWPROT; +wire [2:0] GIC_AWUSER; +wire GIC_AWVALID; +wire GIC_AWREADY; +wire [31:0] GIC_WDATA; +wire [3:0] GIC_WSTRB; +wire GIC_WVALID; +wire GIC_WREADY; +wire [ID_W-1:0] GIC_BID; +wire [1:0] GIC_BRESP; +wire GIC_BVALID; +wire GIC_BREADY; + +assign GIC_ARUSER=3'h0; +assign GIC_AWUSER=3'h0; + + +wire [ID_W-1:0] AWID_DRAM; +wire [31:0] AWADDR_DRAM; +wire [7:0] AWLEN_DRAM; +wire [2:0] AWSIZE_DRAM; +wire [1:0] AWBURST_DRAM; +wire AWLOCK_DRAM; +wire [3:0] AWCACHE_DRAM; +wire [2:0] AWPROT_DRAM; +wire AWVALID_DRAM; +wire AWREADY_DRAM; +wire [63:0] WDATA_DRAM; +wire [7:0] WSTRB_DRAM; +wire WLAST_DRAM; +wire WVALID_DRAM; +wire WREADY_DRAM; +wire [ID_W-1:0] BID_DRAM; +wire [1:0] BRESP_DRAM; +wire BVALID_DRAM; +wire BREADY_DRAM; +wire [ID_W-1:0] ARID_DRAM; +wire [31:0] ARADDR_DRAM; +wire [7:0] ARLEN_DRAM; +wire [2:0] ARSIZE_DRAM; +wire [1:0] ARBURST_DRAM; +wire ARLOCK_DRAM; +wire [3:0] ARCACHE_DRAM; +wire [2:0] ARPROT_DRAM; +wire ARVALID_DRAM; +wire ARREADY_DRAM; +wire [ID_W-1:0] RID_DRAM; +wire [63:0] RDATA_DRAM; +wire [1:0] RRESP_DRAM; +wire RLAST_DRAM; +wire RVALID_DRAM; +wire RREADY_DRAM; + +wire [31:0] HADDR_FLASH; +wire [1:0] HTRANS_FLASH; +wire HWRITE_FLASH; +wire [2:0] HSIZE_FLASH; +wire [2:0] HBURST_FLASH; +wire [3:0] HPROT_FLASH; +wire [31:0] HWDATA_FLASH; +wire [31:0] HRDATA_FLASH; +wire HREADY_FLASH; +wire HRESP_FLASH; + +wire [31:0] PADDR_PERIPHERAL; +wire [31:0] PWDATA_PERIPHERAL; +wire PWRITE_PERIPHERAL; +wire PENABLE_PERIPHERAL; +wire PSELx_PERIPHERAL; +wire [31:0] PRDATA_PERIPHERAL; +wire PSLVERR_PERIPHERAL; +wire PREADY_PERIPHERAL; + +wire [ID_W-1:0] AWID_RAM; +wire [31:0] AWADDR_RAM; +wire [7:0] AWLEN_RAM; +wire [2:0] AWSIZE_RAM; +wire [1:0] AWBURST_RAM; +wire AWLOCK_RAM; +wire [3:0] AWCACHE_RAM; +wire [2:0] AWPROT_RAM; +wire AWVALID_RAM; +wire AWREADY_RAM; +wire [63:0] WDATA_RAM; +wire [7:0] WSTRB_RAM; +wire WLAST_RAM; +wire WVALID_RAM; +wire WREADY_RAM; +wire [ID_W-1:0] BID_RAM; +wire [1:0] BRESP_RAM; +wire BVALID_RAM; +wire BREADY_RAM; +wire [ID_W-1:0] ARID_RAM; +wire [31:0] ARADDR_RAM; +wire [7:0] ARLEN_RAM; +wire [2:0] ARSIZE_RAM; +wire [1:0] ARBURST_RAM; +wire ARLOCK_RAM; +wire [3:0] ARCACHE_RAM; +wire [2:0] ARPROT_RAM; +wire ARVALID_RAM; +wire ARREADY_RAM; +wire [ID_W-1:0] RID_RAM; +wire [63:0] RDATA_RAM; +wire [1:0] RRESP_RAM; +wire RLAST_RAM; +wire RVALID_RAM; +wire RREADY_RAM; + +wire [ID_W-1:0] AWID_ROM; +wire [31:0] AWADDR_ROM; +wire [7:0] AWLEN_ROM; +wire [2:0] AWSIZE_ROM; +wire [1:0] AWBURST_ROM; +wire AWLOCK_ROM; +wire [3:0] AWCACHE_ROM; +wire [2:0] AWPROT_ROM; +wire AWVALID_ROM; +wire AWREADY_ROM; +wire [63:0] WDATA_ROM; +wire [7:0] WSTRB_ROM; +wire WLAST_ROM; +wire WVALID_ROM; +wire WREADY_ROM; +wire [ID_W-1:0] BID_ROM; +wire [1:0] BRESP_ROM; +wire BVALID_ROM; +wire BREADY_ROM; +wire [ID_W-1:0] ARID_ROM; +wire [31:0] ARADDR_ROM; +wire [7:0] ARLEN_ROM; +wire [2:0] ARSIZE_ROM; +wire [1:0] ARBURST_ROM; +wire ARLOCK_ROM; +wire [3:0] ARCACHE_ROM; +wire [2:0] ARPROT_ROM; +wire ARVALID_ROM; +wire ARREADY_ROM; +wire [ID_W-1:0] RID_ROM; +wire [63:0] RDATA_ROM; +wire [1:0] RRESP_ROM; +wire RLAST_ROM; +wire RVALID_ROM; +wire RREADY_ROM; + +wire [31:0] PADDR_FLASH_CTRL; +wire [31:0] PWDATA_FLASH_CTRL; +wire PWRITE_FLASH_CTRL; +wire [2:0] PPROT_FLASH_CTRL; +wire [3:0] PSTRB_FLASH_CTRL; +wire PENABLE_FLASH_CTRL; +wire PSELx_FLASH_CTRL; +wire [31:0] PRDATA_FLASH_CTRL; +wire PSLVERR_FLASH_CTRL; +wire PREADY_FLASH_CTRL; + +wire CPU_nPRESETDBG; +wire CPU_PCLKENDBG; +wire CPU_PSELDBG; +wire [ 31: 0] CPU_PADDRDBG; +wire CPU_PADDRDBG31; +wire CPU_PENABLEDBG; +wire CPU_PWRITEDBG; +wire [ 31: 0] CPU_PWDATADBG; +wire [ 31: 0] CPU_PRDATADBG; +wire CPU_PREADYDBG; +wire CPU_PSLVERRDBG; + +assign CPU_nPRESETDBG = SYS_RESETn; +assign CPU_PCLKENDBG = 1'b1; +assign CPU_PADDRDBG31 = 1'b0; + +wire [(NUM_SPIS-1):0] CPU_IRQS; +wire [5:0] PERI_IRQS; + +assign CPU_IRQS={{(NUM_SPIS-38){1'b0}}, PERI_IRQS}; + +megasoc_cpu_ss #( + .NUM_GICRID_BITS(ID_W), + .NUM_GICWID_BITS(ID_W), + .NUM_SPIS(NUM_SPIS) + ) u_megasoc_cpu_ss( + .CPU_CLK(SYS_CLK), + .RESETn(SYS_RESETn), + + .ACLKENM(1'b1), + .ACINACTM(), + .RDMEMATTR(), + .WRMEMATTR(), + + .AWREADYM(CPU_AWREADYM), + .AWVALIDM(CPU_AWVALIDM), + .AWIDM(CPU_AWIDM), + .AWADDRM(CPU_AWADDRM), + .AWLENM(CPU_AWLENM), + .AWSIZEM(CPU_AWSIZEM), + .AWBURSTM(CPU_AWBURSTM), + .AWLOCKM(CPU_AWLOCKM), + .AWCACHEM(CPU_AWCACHEM), + .AWPROTM(CPU_AWPROTM), + + .WREADYM(CPU_WREADYM), + .WVALIDM(CPU_WVALIDM), + .WIDM(CPU_WIDM), + .WDATAM(CPU_WDATAM), + .WSTRBM(CPU_WSTRBM), + .WLASTM(CPU_WLASTM), + + .BREADYM(CPU_BREADYM), + .BVALIDM(CPU_BVALIDM), + .BIDM(CPU_BIDM), + .BRESPM(CPU_BRESPM), + + .ARREADYM(CPU_ARREADYM), + .ARVALIDM(CPU_ARVALIDM), + .ARIDM(CPU_ARIDM), + .ARADDRM(CPU_ARADDRM), + .ARLENM(CPU_ARLENM), + .ARSIZEM(CPU_ARSIZEM), + .ARBURSTM(CPU_ARBURSTM), + .ARLOCKM(CPU_ARLOCKM), + .ARCACHEM(CPU_ARCACHEM), + .ARPROTM(CPU_ARPROTM), + + .RREADYM(CPU_RREADYM), + .RVALIDM(CPU_RVALIDM), + .RIDM(CPU_RIDM), + .RDATAM(CPU_RDATAM), + .RRESPM(CPU_RRESPM), + .RLASTM(CPU_RLASTM), + + .nPRESETDBG(CPU_nPRESETDBG), + .PCLKENDBG(CPU_PCLKENDBG), + .PSELDBG(CPU_PSELDBG), + .PADDRDBG(CPU_PADDRDBG[30:2]), + .PENABLEDBG(CPU_PENABLEDBG), + .PWRITEDBG(CPU_PWRITEDBG), + .PWDATADBG(CPU_PWDATADBG), + .PRDATADBG(CPU_PRDATADBG), + .PREADYDBG(CPU_PREADYDBG), + .PSLVERRDBG(CPU_PSLVERRDBG), + + .nTRST(nTRST), + .SWCLKTCK(SWCLKTCK), + .SWDITMS(SWDITMS), + .TDI(TDI), + .TDO(TDO), + .nTDOEN(nTDOEN), + .SWDO(SWDO), + .SWDOEN(SWDOEN), + + .GIC_ARID(GIC_ARID), + .GIC_ARADDR(GIC_ARADDR), + .GIC_ARLEN(GIC_ARLEN), + .GIC_ARSIZE(GIC_ARSIZE), + .GIC_ARBURST(GIC_ARBURST), + .GIC_ARPROT(GIC_ARPROT), + .GIC_ARUSER(GIC_ARUSER), + .GIC_ARVALID(GIC_ARVALID), + .GIC_ARREADY(GIC_ARREADY), + + .GIC_RID(GIC_RID), + .GIC_RDATA(GIC_RDATA), + .GIC_RLAST(GIC_RLAST), + .GIC_RRESP(GIC_RRESP), + .GIC_RVALID(GIC_RVALID), + .GIC_RREADY(GIC_RREADY), + + .GIC_AWID(GIC_AWID), + .GIC_AWADDR(GIC_AWADDR), + .GIC_AWLEN(GIC_AWLEN), + .GIC_AWSIZE(GIC_AWSIZE), + .GIC_AWBURST(GIC_AWBURST), + .GIC_AWPROT(GIC_AWPROT), + .GIC_AWUSER(GIC_AWUSER), + .GIC_AWVALID(GIC_AWVALID), + .GIC_AWREADY(GIC_AWREADY), + + .GIC_WDATA(GIC_WDATA), + .GIC_WSTRB(GIC_WSTRB), + .GIC_WVALID(GIC_WVALID), + .GIC_WREADY(GIC_WREADY), + + .GIC_BID(GIC_BID), + .GIC_BRESP(GIC_BRESP), + .GIC_BVALID(GIC_BVALID), + .GIC_BREADY(GIC_BREADY), + + .IRQs(CPU_IRQS) +); + + +nic400_megasoc_main u_nic400_megasoc_main( + .AWID_DRAM(), + .AWADDR_DRAM(), + .AWLEN_DRAM(), + .AWSIZE_DRAM(), + .AWBURST_DRAM(), + .AWLOCK_DRAM(), + .AWCACHE_DRAM(), + .AWPROT_DRAM(), + .AWVALID_DRAM(), + .AWREADY_DRAM(), + .WDATA_DRAM(), + .WSTRB_DRAM(), + .WLAST_DRAM(), + .WVALID_DRAM(), + .WREADY_DRAM(), + .BID_DRAM(), + .BRESP_DRAM(), + .BVALID_DRAM(), + .BREADY_DRAM(), + .ARID_DRAM(), + .ARADDR_DRAM(), + .ARLEN_DRAM(), + .ARSIZE_DRAM(), + .ARBURST_DRAM(), + .ARLOCK_DRAM(), + .ARCACHE_DRAM(), + .ARPROT_DRAM(), + .ARVALID_DRAM(), + .ARREADY_DRAM(), + .RID_DRAM(), + .RDATA_DRAM(), + .RRESP_DRAM(), + .RLAST_DRAM(), + .RVALID_DRAM(), + .RREADY_DRAM(), + + .HADDR_FLASH(HADDR_FLASH), + .HTRANS_FLASH(HTRANS_FLASH), + .HWRITE_FLASH(HWRITE_FLASH), + .HSIZE_FLASH(HSIZE_FLASH), + .HBURST_FLASH(HBURST_FLASH), + .HPROT_FLASH(HPROT_FLASH), + .HWDATA_FLASH(HWDATA_FLASH), + .HRDATA_FLASH(HRDATA_FLASH), + .HREADY_FLASH(HREADY_FLASH), + .HRESP_FLASH(HRESP_FLASH), + + .AWID_GIC(GIC_AWID), + .AWADDR_GIC(GIC_AWADDR), + .AWLEN_GIC(GIC_AWLEN), + .AWSIZE_GIC(GIC_AWSIZE), + .AWBURST_GIC(GIC_AWBURST), + .AWLOCK_GIC(GIC_AWLOCK), + .AWCACHE_GIC(GIC_AWCACHE), + .AWPROT_GIC(GIC_AWPROT), + .AWVALID_GIC(GIC_AWVALID), + .AWREADY_GIC(GIC_AWREADY), + .WDATA_GIC(GIC_WDATA), + .WSTRB_GIC(GIC_WSTRB), + .WLAST_GIC(GIC_WLAST), + .WVALID_GIC(GIC_WVALID), + .WREADY_GIC(GIC_WREADY), + .BID_GIC(GIC_BID), + .BRESP_GIC(GIC_BRESP), + .BVALID_GIC(GIC_BVALID), + .BREADY_GIC(GIC_BREADY), + .ARID_GIC(GIC_ARID), + .ARADDR_GIC(GIC_ARADDR), + .ARLEN_GIC(GIC_ARLEN), + .ARSIZE_GIC(GIC_ARSIZE), + .ARBURST_GIC(GIC_ARBURST), + .ARLOCK_GIC(GIC_ARLOCK), + .ARCACHE_GIC(GIC_ARCACHE), + .ARPROT_GIC(GIC_ARPROT), + .ARVALID_GIC(GIC_ARVALID), + .ARREADY_GIC(GIC_ARREADY), + .RID_GIC(GIC_RID), + .RDATA_GIC(GIC_RDATA), + .RRESP_GIC(GIC_RRESP), + .RLAST_GIC(GIC_RLAST), + .RVALID_GIC(GIC_RVALID), + .RREADY_GIC(GIC_RREADY), + + .PADDR_PERIPHERAL(PADDR_PERIPHERAL), + .PWDATA_PERIPHERAL(PWDATA_PERIPHERAL), + .PWRITE_PERIPHERAL(PWRITE_PERIPHERAL), + .PENABLE_PERIPHERAL(PENABLE_PERIPHERAL), + .PSELx_PERIPHERAL(PSELx_PERIPHERAL), + .PRDATA_PERIPHERAL(PRDATA_PERIPHERAL), + .PSLVERR_PERIPHERAL(PSLVERR_PERIPHERAL), + .PREADY_PERIPHERAL(PREADY_PERIPHERAL), + + + .AWID_RAM(AWID_RAM), + .AWADDR_RAM(AWADDR_RAM), + .AWLEN_RAM(AWLEN_RAM), + .AWSIZE_RAM(AWSIZE_RAM), + .AWBURST_RAM(AWBURST_RAM), + .AWLOCK_RAM(AWLOCK_RAM), + .AWCACHE_RAM(AWCACHE_RAM), + .AWPROT_RAM(AWPROT_RAM), + .AWVALID_RAM(AWVALID_RAM), + .AWREADY_RAM(AWREADY_RAM), + .WDATA_RAM(WDATA_RAM), + .WSTRB_RAM(WSTRB_RAM), + .WLAST_RAM(WLAST_RAM), + .WVALID_RAM(WVALID_RAM), + .WREADY_RAM(WREADY_RAM), + .BID_RAM(BID_RAM), + .BRESP_RAM(BRESP_RAM), + .BVALID_RAM(BVALID_RAM), + .BREADY_RAM(BREADY_RAM), + .ARID_RAM(ARID_RAM), + .ARADDR_RAM(ARADDR_RAM), + .ARLEN_RAM(ARLEN_RAM), + .ARSIZE_RAM(ARSIZE_RAM), + .ARBURST_RAM(ARBURST_RAM), + .ARLOCK_RAM(ARLOCK_RAM), + .ARCACHE_RAM(ARCACHE_RAM), + .ARPROT_RAM(ARPROT_RAM), + .ARVALID_RAM(ARVALID_RAM), + .ARREADY_RAM(ARREADY_RAM), + .RID_RAM(RID_RAM), + .RDATA_RAM(RDATA_RAM), + .RRESP_RAM(RRESP_RAM), + .RLAST_RAM(RLAST_RAM), + .RVALID_RAM(RVALID_RAM), + .RREADY_RAM(RREADY_RAM), + + .AWID_ROM(AWID_ROM), + .AWADDR_ROM(AWADDR_ROM), + .AWLEN_ROM(AWLEN_ROM), + .AWSIZE_ROM(AWSIZE_ROM), + .AWBURST_ROM(AWBURST_ROM), + .AWLOCK_ROM(AWLOCK_ROM), + .AWCACHE_ROM(AWCACHE_ROM), + .AWPROT_ROM(AWPROT_ROM), + .AWVALID_ROM(AWVALID_ROM), + .AWREADY_ROM(AWREADY_ROM), + .WDATA_ROM(WDATA_ROM), + .WSTRB_ROM(WSTRB_ROM), + .WLAST_ROM(WLAST_ROM), + .WVALID_ROM(WVALID_ROM), + .WREADY_ROM(WREADY_ROM), + .BID_ROM(BID_ROM), + .BRESP_ROM(BRESP_ROM), + .BVALID_ROM(BVALID_ROM), + .BREADY_ROM(BREADY_ROM), + .ARID_ROM(ARID_ROM), + .ARADDR_ROM(ARADDR_ROM), + .ARLEN_ROM(ARLEN_ROM), + .ARSIZE_ROM(ARSIZE_ROM), + .ARBURST_ROM(ARBURST_ROM), + .ARLOCK_ROM(ARLOCK_ROM), + .ARCACHE_ROM(ARCACHE_ROM), + .ARPROT_ROM(ARPROT_ROM), + .ARVALID_ROM(ARVALID_ROM), + .ARREADY_ROM(ARREADY_ROM), + .RID_ROM(RID_ROM), + .RDATA_ROM(RDATA_ROM), + .RRESP_ROM(RRESP_ROM), + .RLAST_ROM(RLAST_ROM), + .RVALID_ROM(RVALID_ROM), + .RREADY_ROM(RREADY_ROM), + + .PADDR_DEBUG(CPU_PADDRDBG), + .PWDATA_DEBUG(CPU_PWDATADBG), + .PWRITE_DEBUG(CPU_PWRITEDBG), + .PENABLE_DEBUG(CPU_PENABLEDBG), + .PSELx_DEBUG(CPU_PSELDBG), + .PRDATA_DEBUG(CPU_PRDATADBG), + .PSLVERR_DEBUG(CPU_PSLVERRDBG), + .PREADY_DEBUG(CPU_PREADYDBG), + + + .PADDR_FLASH_CTRL(PADDR_FLASH_CTRL), + .PWDATA_FLASH_CTRL(PWDATA_FLASH_CTRL), + .PWRITE_FLASH_CTRL(PWRITE_FLASH_CTRL), + .PPROT_FLASH_CTRL(PPROT_FLASH_CTRL), + .PSTRB_FLASH_CTRL(PSTRB_FLASH_CTRL), + .PENABLE_FLASH_CTRL(PENABLE_FLASH_CTRL), + .PSELx_FLASH_CTRL(PSELx_FLASH_CTRL), + .PRDATA_FLASH_CTRL(PRDATA_FLASH_CTRL), + .PSLVERR_FLASH_CTRL(PSLVERR_FLASH_CTRL), + .PREADY_FLASH_CTRL(PREADY_FLASH_CTRL), + + .AWID_A53(CPU_AWIDM), + .AWADDR_A53(CPU_AWADDRM), + .AWLEN_A53(CPU_AWLENM), + .AWSIZE_A53(CPU_AWSIZEM), + .AWBURST_A53(CPU_AWBURSTM), + .AWLOCK_A53(CPU_AWLOCKM), + .AWCACHE_A53(CPU_AWCACHEM), + .AWPROT_A53(CPU_AWPROTM), + .AWVALID_A53(CPU_AWVALIDM), + .AWREADY_A53(CPU_AWREADYM), + .WDATA_A53(CPU_WDATAM), + .WSTRB_A53(CPU_WSTRBM), + .WLAST_A53(CPU_WLASTM), + .WVALID_A53(CPU_WVALIDM), + .WREADY_A53(CPU_WREADYM), + .BID_A53(CPU_BIDM), + .BRESP_A53(CPU_BRESPM), + .BVALID_A53(CPU_BVALIDM), + .BREADY_A53(CPU_BREADYM), + .ARID_A53(CPU_ARIDM), + .ARADDR_A53(CPU_ARADDRM), + .ARLEN_A53(CPU_ARLENM), + .ARSIZE_A53(CPU_ARSIZEM), + .ARBURST_A53(CPU_ARBURSTM), + .ARLOCK_A53(CPU_ARLOCKM), + .ARCACHE_A53(CPU_ARCACHEM), + .ARPROT_A53(CPU_ARPROTM), + .ARVALID_A53(CPU_ARVALIDM), + .ARREADY_A53(CPU_ARREADYM), + .RID_A53(CPU_RIDM), + .RDATA_A53(CPU_RDATAM), + .RRESP_A53(CPU_RRESPM), + .RLAST_A53(CPU_RLASTM), + .RVALID_A53(CPU_RVALIDM), + .RREADY_A53(CPU_RREADYM), + + .clk0clk(SYS_CLK), + .clk0clken(SYS_CLKEN), + .clk0resetn(SYS_RESETn) + +); + +ROM_wrapper u_ROM_wrapper( + .ACLK(SYS_CLK), + .ARESETn(SYS_RESETn), + .AWVALID(AWVALID_ROM), + .AWREADY(AWREADY_ROM), + .AWID(AWID_ROM), + .AWADDR(AWADDR_ROM), + .AWLEN(AWLEN_ROM), + .AWSIZE(AWSIZE_ROM), + .AWBURST(AWBURST_ROM), + .AWLOCK(AWLOCK_ROM), + .AWPROT(AWPROT_ROM), + .AWQOS(4'h0), + .WVALID(WVALID_ROM), + .WREADY(WREADY_ROM), + .WDATA(WDATA_ROM), + .WSTRB(WSTRB_ROM), + .WLAST(WLAST_ROM), + .WPOISON(1'b0), + .BVALID(BVALID_ROM), + .BREADY(BREADY_ROM), + .BID(BID_ROM), + .BRESP(BRESP_ROM), + .ARVALID(ARVALID_ROM), + .ARREADY(ARREADY_ROM), + .ARID(ARID_ROM), + .ARADDR(ARADDR_ROM), + .ARLEN(ARLEN_ROM), + .ARSIZE(ARSIZE_ROM), + .ARBURST(ARBURST_ROM), + .ARLOCK(ARLOCK_ROM), + .ARPROT(ARPROT_ROM), + .ARQOS(4'h0), + .RVALID(RVALID_ROM), + .RREADY(RREADY_ROM), + .RID(RID_ROM), + .RDATA(RDATA_ROM), + .RRESP(RRESP_ROM), + .RLAST(RLAST_ROM), + .RPOISON(), + .AWAKEUP(1'b1), + .clk_qreqn(SYS_CLKEN), + .clk_qacceptn(), + .clk_qdeny(), + .clk_qactive(), + .pwr_qreqn(SYS_CLKEN), + .pwr_qacceptn(), + .pwr_qdeny(), + .pwr_qactive(), + .ext_gt_qreqn(1'b1), + .ext_gt_qacceptn(), + .cfg_gate_resp(1'b0) +); + +// top_ahb_qspi #(.DATA_W(32)) u_sl_ahb_qspi( +// .HCLK(SYS_CLK), +// .HRESETn(SYS_RESETn), +// .PCLK(SYS_CLK), +// .PRESETn(SYS_RESETn), +// .HADDR(HADDR_FLASH), +// .HTRANS(HTRANS_FLASH), +// .HWRITE(HWRITE_FLASH), +// .HSIZE(HSIZE_FLASH), +// .HBURST(HBURST_FLASH), +// .HPROT(HPROT_FLASH), +// .HWDATA(HWDATA_FLASH), +// .HSELx(1'b1), +// .HRDATA(HRDATA_FLASH), +// .HREADY(1'b1), +// .HREADYOUT(HREADY_FLASH), +// .HRESP(HRESP_FLASH), +// .PADDR(PADDR_FLASH_CTRL), +// .PPROT(PPROT_FLASH_CTRL), +// .PSEL(PSELx_FLASH_CTRL), +// .PENABLE(PENABLE_FLASH_CTRL), +// .PWRITE(PWRITE_FLASH_CTRL), +// .PWDATA(PWDATA_FLASH_CTRL), +// .PSTRB(PSTRB_FLASH_CTRL), +// .PRDATA(PRDATA_FLASH_CTRL), +// .PREADY(PREADY_FLASH_CTRL), +// .PSLVERR(PSLVERR_FLASH_CTRL), +// .QSPI_SCLK(QSPI_SCLK), +// .QSPI_nCS(QSPI_nCS), +// .QSPI_IO_o(QSPI_IO_o), +// .QSPI_IO_i(QSPI_IO_i), +// .QSPI_IO_e(QSPI_IO_e) +// ); + +sl_ahb_sram u_sl_ahb_sram( + .HCLK(SYS_CLK), + .HRESETn(SYS_RESETn), + .HSEL(1'b1), + .HREADY(1'b1), + .HTRANS(HTRANS_FLASH), + .HSIZE(HSIZE_FLASH), + .HWRITE(HWRITE_FLASH), + .HADDR(HADDR_FLASH), + .HWDATA(HWDATA_FLASH), + .HREADYOUT(HREADY_FLASH), + .HRESP(HRESP_FLASH), + .HRDATA(HRDATA_FLASH) +); + +SRAM_wrapper u_SRAM_wrapper( + .ACLK(SYS_CLK), + .ARESETn(SYS_RESETn), + .AWVALID(AWVALID_RAM), + .AWREADY(AWREADY_RAM), + .AWID(AWID_RAM), + .AWADDR(AWADDR_RAM), + .AWLEN(AWLEN_RAM), + .AWSIZE(AWSIZE_RAM), + .AWBURST(AWBURST_RAM), + .AWLOCK(AWLOCK_RAM), + .AWPROT(AWPROT_RAM), + .AWQOS(4'h0), + .WVALID(WVALID_RAM), + .WREADY(WREADY_RAM), + .WDATA(WDATA_RAM), + .WSTRB(WSTRB_RAM), + .WLAST(WLAST_RAM), + .WPOISON(1'b0), + .BVALID(BVALID_RAM), + .BREADY(BREADY_RAM), + .BID(BID_RAM), + .BRESP(BRESP_RAM), + .ARVALID(ARVALID_RAM), + .ARREADY(ARREADY_RAM), + .ARID(ARID_RAM), + .ARADDR(ARADDR_RAM), + .ARLEN(ARLEN_RAM), + .ARSIZE(ARSIZE_RAM), + .ARBURST(ARBURST_RAM), + .ARLOCK(ARLOCK_RAM), + .ARPROT(ARPROT_RAM), + .ARQOS(4'h0), + .RVALID(RVALID_RAM), + .RREADY(RREADY_RAM), + .RID(RID_RAM), + .RDATA(RDATA_RAM), + .RRESP(RRESP_RAM), + .RLAST(RLAST_RAM), + .RPOISON(), + .AWAKEUP(1'b1), + .clk_qreqn(SYS_CLKEN), + .clk_qacceptn(), + .clk_qdeny(), + .clk_qactive(), + .pwr_qreqn(SYS_CLKEN), + .pwr_qacceptn(), + .pwr_qdeny(), + .pwr_qactive(), + .ext_gt_qreqn(1'b1), + .ext_gt_qacceptn(), + .cfg_gate_resp(1'b0) +); +megasoc_peripheral_subsystem u_megasoc_peripheral_subsystem( + .PCLK(SYS_CLK), + .PRESETn(SYS_RESETn), + .PADDR(PADDR_PERIPHERAL), + .PENABLE(PENABLE_PERIPHERAL), + .PWRITE(PWRITE_PERIPHERAL), + .PWDATA(PWDATA_PERIPHERAL), + .PSEL(PSELx_PERIPHERAL), + .PRDATA(PRDATA_PERIPHERAL), + .PREADY(PREADY_PERIPHERAL), + .PSLVERR(PSLVERR_PERIPHERAL), + .UARTRXD(UARTRXD), + .UARTTXD(UARTTXD), + .UARTTXEN(UARTTXEN), + .PERI_IRQS(PERI_IRQS) ); endmodule \ No newline at end of file diff --git a/makefile b/makefile index cdf4b487b09e5a392ad5681eac9adac603035032..ee5fde858cb12dcc6ae2210e8fb8ff5f1a77fb16 100644 --- a/makefile +++ b/makefile @@ -12,15 +12,13 @@ include ./make.cfg -build_m55: - $(Cortex_M55_IP_DIR)/yamin/logical/generate -config ./socrates/Cortex-M55/yamin.yaml -output ./logical/Cortex-M55 -daplite2 $(Cortex_M55_IP_DIR)/daplite2 -tpium $(Cortex_M55_IP_DIR)/tpium -verbose -pmc100 none -soc600m $(SOC600_IP_DIR)/ build_pck600: socrates_cli --project megasoc_tech -data ../ --flow build.configured.component configuredComponentName=pck600_clk_ctrl_1 socrates_cli --project megasoc_tech -data ../ --flow build.configured.component configuredComponentName=pck600_ppu_1 build_sie300_sram_ctrl: @$(SIE300_IP_LOGICAL_DIR)/generate --config ./socrates/BP301_SRAM/config/SRAM_ctrl.yaml --output ./logical/SMC build_nic400: - socrates_cli --project megasoc_tech -data ../ --flow build.configured.component configuredComponentName=nic400_millisoc_system + socrates_cli --project megasoc_tech -data ../ --flow build.configured.component configuredComponentName=nic400_megasoc_main build_ip: make_project: diff --git a/socrates/BP301_SRAM/config/SRAM_ctrl.yaml b/socrates/BP301_SRAM/config/SRAM_ctrl.yaml index 22a2341307b3d3e4d2bef2bad3a7b48e7e46e15d..e4838bb620df6846b5bc273920e4cd28be0b0d0c 100644 --- a/socrates/BP301_SRAM/config/SRAM_ctrl.yaml +++ b/socrates/BP301_SRAM/config/SRAM_ctrl.yaml @@ -63,7 +63,7 @@ DATA_WIDTH: 64 # ID_WIDTH: AXI5 ID width for all channels # Valid values: # 2-32 -ID_WIDTH: 4 +ID_WIDTH: 5 # diff --git a/socrates/CortexA53_1/CortexA53_1.xml b/socrates/CortexA53_1/CortexA53_1.xml index 4044349d46a62e1444d44d2ce7be86ce608b4b19..df65dfd421f823c5f62c25654195b21fc74c980c 100644 --- a/socrates/CortexA53_1/CortexA53_1.xml +++ b/socrates/CortexA53_1/CortexA53_1.xml @@ -17,11 +17,11 @@ </Parameter> <Parameter> <Name>ACP</Name> - <Value>TRUE</Value> + <Value>FALSE</Value> </Parameter> <Parameter> <Name>ACE</Name> - <Value>FALSE</Value> + <Value>TRUE</Value> </Parameter> <Parameter> <Name>L1_ICACHE_SIZE</Name> @@ -49,7 +49,7 @@ </Parameter> <Parameter> <Name>L2_CACHE_SIZE</Name> - <Value>512KB</Value> + <Value>256KB</Value> </Parameter> <Parameter> <Name>L2_INPUT_LATENCY</Name> @@ -57,7 +57,7 @@ </Parameter> <Parameter> <Name>NUM_CPUS</Name> - <Value>2</Value> + <Value>1</Value> </Parameter> <Parameter> <Name>CRYPTO</Name> @@ -100,14 +100,6 @@ <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>interrupt_master_VCPU_MNT_IRQ1</Name> - <UID>IF-interrupt_master_VCPU_MNT_IRQ1</UID> - <Requester/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>TimerEventInterface_master_CPU0</Name> <UID>IF-TimerEventInterface_master_CPU0</UID> @@ -116,14 +108,6 @@ <ProtocolRef>TimerEventInterface</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>TimerEventInterface_master_CPU1</Name> - <UID>IF-TimerEventInterface_master_CPU1</UID> - <Requester/> - <Protocol> - <ProtocolRef>TimerEventInterface</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>ATB_master_CPU0</Name> <UID>IF-ATB_master_CPU0</UID> @@ -143,15 +127,6 @@ </Protocol> <ClockDomainRef>CD-CLKIN</ClockDomainRef> </Interface> - <Interface> - <Name>CHI-RNF_master</Name> - <UID>IF-CHI-RNF_master</UID> - <Requester/> - <Protocol> - <ProtocolRef>CHI-RNF-r0p0_0</ProtocolRef> - </Protocol> - <ClockDomainRef>CD-CLKIN</ClockDomainRef> - </Interface> <Interface> <Name>AXI4Stream_master_PROCESSOR</Name> <UID>IF-AXI4Stream_master_PROCESSOR</UID> @@ -198,28 +173,47 @@ <ClockDomainRef>CD-CLKIN</ClockDomainRef> </Interface> <Interface> - <Name>Channel_master_CTI_IRQ1</Name> - <UID>IF-Channel_master_CTI_IRQ1</UID> + <Name>ACE_master</Name> + <UID>IF-ACE_master</UID> <Requester/> <Protocol> - <ProtocolRef>Channel</ProtocolRef> - </Protocol> - <ClockDomainRef>CD-CLKIN</ClockDomainRef> - </Interface> - <Interface> - <Name>ATB_master_CPU1</Name> - <UID>IF-ATB_master_CPU1</UID> - <Requester/> - <Protocol> - <ProtocolRef>ATB4</ProtocolRef> + <ProtocolRef>ACE</ProtocolRef> <Parameters> <Parameter> - <Name>ATBDataWidth</Name> - <Value>32</Value> + <Name>ADDR_WIDTH</Name> + <Value>44</Value> </Parameter> <Parameter> - <Name>ATBytesWidth</Name> - <Value>2</Value> + <Name>DATA_WIDTH</Name> + <Value>128</Value> + </Parameter> + <Parameter> + <Name>ID_R_WIDTH</Name> + <Value>5</Value> + </Parameter> + <Parameter> + <Name>ID_W_WIDTH</Name> + <Value>5</Value> + </Parameter> + <Parameter> + <Name>AWUSER_WIDTH</Name> + <Value>0</Value> + </Parameter> + <Parameter> + <Name>ARUSER_WIDTH</Name> + <Value>0</Value> + </Parameter> + <Parameter> + <Name>WUSER_WIDTH</Name> + <Value>0</Value> + </Parameter> + <Parameter> + <Name>RUSER_WIDTH</Name> + <Value>0</Value> + </Parameter> + <Parameter> + <Name>BUSER_WIDTH</Name> + <Value>0</Value> </Parameter> </Parameters> </Protocol> @@ -233,14 +227,6 @@ <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>interrupt_master_COMMIRQ1</Name> - <UID>IF-interrupt_master_COMMIRQ1</UID> - <Requester/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>interrupt_master_PMU_IRQ0</Name> <UID>IF-interrupt_master_PMU_IRQ0</UID> @@ -249,14 +235,6 @@ <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>interrupt_master_PMU_IRQ1</Name> - <UID>IF-interrupt_master_PMU_IRQ1</UID> - <Requester/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>interrupt_master_EXTERRIRQ</Name> <UID>IF-interrupt_master_EXTERRIRQ</UID> @@ -265,20 +243,6 @@ <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP13</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP13</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> <Interface> <Name>interrupt_slave_FIQ0</Name> <UID>IF-interrupt_slave_FIQ0</UID> @@ -287,14 +251,6 @@ <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>interrupt_slave_FIQ1</Name> - <UID>IF-interrupt_slave_FIQ1</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>AXI4Stream_slave_DISTRIBUTOR</Name> <UID>IF-AXI4Stream_slave_DISTRIBUTOR</UID> @@ -322,20 +278,6 @@ </Protocol> <ClockDomainRef>CD-CLKIN</ClockDomainRef> </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF1_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF1_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> <Interface> <Name>RESET_slave_CPU_PORESET0</Name> <UID>IF-RESET_slave_CPU_PORESET0</UID> @@ -344,14 +286,6 @@ <ProtocolRef>RESET</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>RESET_slave_CPU_PORESET1</Name> - <UID>IF-RESET_slave_CPU_PORESET1</UID> - <Completer/> - <Protocol> - <ProtocolRef>RESET</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>APB_slave_DEBUG</Name> <UID>IF-APB_slave_DEBUG</UID> @@ -393,20 +327,6 @@ </Parameters> </Protocol> </Interface> - <Interface> - <Name>Staticcfg_slave_CFGEND1</Name> - <UID>IF-Staticcfg_slave_CFGEND1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> <Interface> <Name>Q-Channel_slave_NEON0</Name> <UID>IF-Q-Channel_slave_NEON0</UID> @@ -415,14 +335,6 @@ <ProtocolRef>Q-Channel-generic</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>Q-Channel_slave_NEON1</Name> - <UID>IF-Q-Channel_slave_NEON1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Q-Channel-generic</ProtocolRef> - </Protocol> - </Interface> <Interface> <Name>Staticcfg_slave_CFGTE0</Name> <UID>IF-Staticcfg_slave_CFGTE0</UID> @@ -437,20 +349,6 @@ </Parameters> </Protocol> </Interface> - <Interface> - <Name>Staticcfg_slave_CFGTE1</Name> - <UID>IF-Staticcfg_slave_CFGTE1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> <Interface> <Name>interrupt_slave_VIRQ0</Name> <UID>IF-interrupt_slave_VIRQ0</UID> @@ -460,16 +358,16 @@ </Protocol> </Interface> <Interface> - <Name>interrupt_slave_VIRQ1</Name> - <UID>IF-interrupt_slave_VIRQ1</UID> + <Name>interrupt_slave_VFIQ0</Name> + <UID>IF-interrupt_slave_VFIQ0</UID> <Completer/> <Protocol> <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP4</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP4</UID> + <Name>Staticcfg_slave_VINITHI0</Name> + <UID>IF-Staticcfg_slave_VINITHI0</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -482,8 +380,16 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP10</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP10</UID> + <Name>RESET_slave_MBIST</Name> + <UID>IF-RESET_slave_MBIST</UID> + <Completer/> + <Protocol> + <ProtocolRef>RESET</ProtocolRef> + </Protocol> + </Interface> + <Interface> + <Name>Staticcfg_slave_CLUSTERIDAFF1</Name> + <UID>IF-Staticcfg_slave_CLUSTERIDAFF1</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -496,24 +402,16 @@ </Protocol> </Interface> <Interface> - <Name>interrupt_slave_VFIQ0</Name> - <UID>IF-interrupt_slave_VFIQ0</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_VFIQ1</Name> - <UID>IF-interrupt_slave_VFIQ1</UID> + <Name>Authentication_slave_CPU0</Name> + <UID>IF-Authentication_slave_CPU0</UID> <Completer/> <Protocol> - <ProtocolRef>interrupt</ProtocolRef> + <ProtocolRef>Authentication</ProtocolRef> </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP9</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP9</UID> + <Name>Staticcfg_slave_CP15SDISABLE0</Name> + <UID>IF-Staticcfg_slave_CP15SDISABLE0</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -526,16 +424,17 @@ </Protocol> </Interface> <Interface> - <Name>ACP_slave</Name> - <UID>IF-ACP_slave</UID> + <Name>Channel_slave_CTI_CHIN</Name> + <UID>IF-Channel_slave_CTI_CHIN</UID> <Completer/> <Protocol> - <ProtocolRef>ACP4</ProtocolRef> + <ProtocolRef>Channel</ProtocolRef> </Protocol> + <ClockDomainRef>CD-CLKIN</ClockDomainRef> </Interface> <Interface> - <Name>Staticcfg_slave_SAMMN_BASE</Name> - <UID>IF-Staticcfg_slave_SAMMN_BASE</UID> + <Name>Staticcfg_slave_SYSBARDISABLE</Name> + <UID>IF-Staticcfg_slave_SYSBARDISABLE</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -548,8 +447,8 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP5</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP5</UID> + <Name>Staticcfg_slave_GICC_DISABLE</Name> + <UID>IF-Staticcfg_slave_GICC_DISABLE</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -562,8 +461,8 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMMN_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMMN_NODEID</UID> + <Name>Staticcfg_slave_AA64nAA32_CPU0</Name> + <UID>IF-Staticcfg_slave_AA64nAA32_CPU0</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -576,8 +475,8 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP0</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP0</UID> + <Name>Staticcfg_slave_BROADCASTINNER</Name> + <UID>IF-Staticcfg_slave_BROADCASTINNER</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -590,36 +489,24 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_VINITHI0</Name> - <UID>IF-Staticcfg_slave_VINITHI0</UID> + <Name>RESET_slave_CORE_RESET0</Name> + <UID>IF-RESET_slave_CORE_RESET0</UID> <Completer/> <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> + <ProtocolRef>RESET</ProtocolRef> </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_VINITHI1</Name> - <UID>IF-Staticcfg_slave_VINITHI1</UID> + <Name>interrupt_slave_SEI0</Name> + <UID>IF-interrupt_slave_SEI0</UID> <Completer/> <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> + <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP12</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP12</UID> + <Name>Staticcfg_slave_CLUSTERIDAFF2</Name> + <UID>IF-Staticcfg_slave_CLUSTERIDAFF2</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -632,16 +519,8 @@ </Protocol> </Interface> <Interface> - <Name>RESET_slave_MBIST</Name> - <UID>IF-RESET_slave_MBIST</UID> - <Completer/> - <Protocol> - <ProtocolRef>RESET</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_CLUSTERIDAFF1</Name> - <UID>IF-Staticcfg_slave_CLUSTERIDAFF1</UID> + <Name>Staticcfg_slave_BROADCASTCACHEMAINT</Name> + <UID>IF-Staticcfg_slave_BROADCASTCACHEMAINT</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -654,38 +533,41 @@ </Protocol> </Interface> <Interface> - <Name>Authentication_slave_CPU0</Name> - <UID>IF-Authentication_slave_CPU0</UID> + <Name>interrupt_slave_IRQ0</Name> + <UID>IF-interrupt_slave_IRQ0</UID> <Completer/> <Protocol> - <ProtocolRef>Authentication</ProtocolRef> + <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> <Interface> - <Name>Authentication_slave_CPU1</Name> - <UID>IF-Authentication_slave_CPU1</UID> + <Name>RESET_slave_L2</Name> + <UID>IF-RESET_slave_L2</UID> <Completer/> <Protocol> - <ProtocolRef>Authentication</ProtocolRef> + <ProtocolRef>RESET</ProtocolRef> </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_CP15SDISABLE0</Name> - <UID>IF-Staticcfg_slave_CP15SDISABLE0</UID> + <Name>interrupt_slave_VSEI0</Name> + <UID>IF-interrupt_slave_VSEI0</UID> <Completer/> <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> + <ProtocolRef>interrupt</ProtocolRef> + </Protocol> + </Interface> + <Interface> + <Name>EVENT_slave_EDBGRQ0</Name> + <UID>IF-EVENT_slave_EDBGRQ0</UID> + <Completer/> + <Protocol> + <ProtocolRef>EVENT</ProtocolRef> </Protocol> + <ClockDomainRef>CD-CLKIN</ClockDomainRef> </Interface> <Interface> - <Name>Staticcfg_slave_CP15SDISABLE1</Name> - <UID>IF-Staticcfg_slave_CP15SDISABLE1</UID> + <Name>Staticcfg_slave_DBGROMADDR</Name> + <UID>IF-Staticcfg_slave_DBGROMADDR</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -698,17 +580,16 @@ </Protocol> </Interface> <Interface> - <Name>Channel_slave_CTI_CHIN</Name> - <UID>IF-Channel_slave_CTI_CHIN</UID> + <Name>Q-Channel_slave_CPU0</Name> + <UID>IF-Q-Channel_slave_CPU0</UID> <Completer/> <Protocol> - <ProtocolRef>Channel</ProtocolRef> + <ProtocolRef>Q-Channel-generic</ProtocolRef> </Protocol> - <ClockDomainRef>CD-CLKIN</ClockDomainRef> </Interface> <Interface> - <Name>Staticcfg_slave_NODEID</Name> - <UID>IF-Staticcfg_slave_NODEID</UID> + <Name>Staticcfg_slave_BROADCASTOUTER</Name> + <UID>IF-Staticcfg_slave_BROADCASTOUTER</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -721,8 +602,8 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP8</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP8</UID> + <Name>Staticcfg_slave_RVB_ARADDR0</Name> + <UID>IF-Staticcfg_slave_RVB_ARADDR0</UID> <Completer/> <Protocol> <ProtocolRef>Staticcfg</ProtocolRef> @@ -735,520 +616,8 @@ </Protocol> </Interface> <Interface> - <Name>Staticcfg_slave_SAMADDRMAP2</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP2</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF0_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF0_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_RVB_ARADDR1</Name> - <UID>IF-Staticcfg_slave_RVB_ARADDR1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF5_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF5_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SYSBARDISABLE</Name> - <UID>IF-Staticcfg_slave_SYSBARDISABLE</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_GICC_DISABLE</Name> - <UID>IF-Staticcfg_slave_GICC_DISABLE</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP1</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF2_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF2_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_AA64nAA32_CPU0</Name> - <UID>IF-Staticcfg_slave_AA64nAA32_CPU0</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_AA64nAA32_CPU1</Name> - <UID>IF-Staticcfg_slave_AA64nAA32_CPU1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_BROADCASTINNER</Name> - <UID>IF-Staticcfg_slave_BROADCASTINNER</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>RESET_slave_CORE_RESET0</Name> - <UID>IF-RESET_slave_CORE_RESET0</UID> - <Completer/> - <Protocol> - <ProtocolRef>RESET</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>RESET_slave_CORE_RESET1</Name> - <UID>IF-RESET_slave_CORE_RESET1</UID> - <Completer/> - <Protocol> - <ProtocolRef>RESET</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_SEI0</Name> - <UID>IF-interrupt_slave_SEI0</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_SEI1</Name> - <UID>IF-interrupt_slave_SEI1</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_CLUSTERIDAFF2</Name> - <UID>IF-Staticcfg_slave_CLUSTERIDAFF2</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNI0_NODID</Name> - <UID>IF-Staticcfg_slave_SAMHNI0_NODID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_BROADCASTCACHEMAINT</Name> - <UID>IF-Staticcfg_slave_BROADCASTCACHEMAINT</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF4_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF4_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP6</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP6</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SCLKEN</Name> - <UID>IF-Staticcfg_slave_SCLKEN</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP3</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP3</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF6_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF6_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_IRQ0</Name> - <UID>IF-interrupt_slave_IRQ0</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_IRQ1</Name> - <UID>IF-interrupt_slave_IRQ1</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP15</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP15</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF_MODE</Name> - <UID>IF-Staticcfg_slave_SAMHNF_MODE</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>RESET_slave_L2</Name> - <UID>IF-RESET_slave_L2</UID> - <Completer/> - <Protocol> - <ProtocolRef>RESET</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_VSEI0</Name> - <UID>IF-interrupt_slave_VSEI0</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>interrupt_slave_VSEI1</Name> - <UID>IF-interrupt_slave_VSEI1</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF7_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF7_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP7</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP7</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNI1_NODID</Name> - <UID>IF-Staticcfg_slave_SAMHNI1_NODID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>EVENT_slave_EDBGRQ0</Name> - <UID>IF-EVENT_slave_EDBGRQ0</UID> - <Completer/> - <Protocol> - <ProtocolRef>EVENT</ProtocolRef> - </Protocol> - <ClockDomainRef>CD-CLKIN</ClockDomainRef> - </Interface> - <Interface> - <Name>EVENT_slave_EDBGRQ1</Name> - <UID>IF-EVENT_slave_EDBGRQ1</UID> - <Completer/> - <Protocol> - <ProtocolRef>EVENT</ProtocolRef> - </Protocol> - <ClockDomainRef>CD-CLKIN</ClockDomainRef> - </Interface> - <Interface> - <Name>Staticcfg_slave_DBGROMADDR</Name> - <UID>IF-Staticcfg_slave_DBGROMADDR</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Q-Channel_slave_CPU0</Name> - <UID>IF-Q-Channel_slave_CPU0</UID> - <Completer/> - <Protocol> - <ProtocolRef>Q-Channel-generic</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Q-Channel_slave_CPU1</Name> - <UID>IF-Q-Channel_slave_CPU1</UID> - <Completer/> - <Protocol> - <ProtocolRef>Q-Channel-generic</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SINACT</Name> - <UID>IF-Staticcfg_slave_SINACT</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_BROADCASTOUTER</Name> - <UID>IF-Staticcfg_slave_BROADCASTOUTER</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_RVB_ARADDR0</Name> - <UID>IF-Staticcfg_slave_RVB_ARADDR0</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP14</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP14</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> - <Interface> - <Name>WTimestamp_slave</Name> - <UID>IF-WTimestamp_slave</UID> + <Name>WTimestamp_slave</Name> + <UID>IF-WTimestamp_slave</UID> <Completer/> <Protocol> <ProtocolRef>WTimestamp</ProtocolRef> @@ -1275,20 +644,6 @@ </Parameters> </Protocol> </Interface> - <Interface> - <Name>Staticcfg_slave_SAMADDRMAP11</Name> - <UID>IF-Staticcfg_slave_SAMADDRMAP11</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> <Interface> <Name>Staticcfg_slave_PERIPHBASE</Name> <UID>IF-Staticcfg_slave_PERIPHBASE</UID> @@ -1311,28 +666,6 @@ <ProtocolRef>interrupt</ProtocolRef> </Protocol> </Interface> - <Interface> - <Name>interrupt_slave_REI1</Name> - <UID>IF-interrupt_slave_REI1</UID> - <Completer/> - <Protocol> - <ProtocolRef>interrupt</ProtocolRef> - </Protocol> - </Interface> - <Interface> - <Name>Staticcfg_slave_SAMHNF3_NODEID</Name> - <UID>IF-Staticcfg_slave_SAMHNF3_NODEID</UID> - <Completer/> - <Protocol> - <ProtocolRef>Staticcfg</ProtocolRef> - <Parameters> - <Parameter> - <Name>CONFIGURATION_WIDTH</Name> - <Value>1</Value> - </Parameter> - </Parameters> - </Protocol> - </Interface> </Interfaces> </Specification> </ConfiguredComponent> diff --git a/socrates/nic400_megasoc_main/nic400_megasoc_main.xml b/socrates/nic400_megasoc_main/nic400_megasoc_main.xml new file mode 100644 index 0000000000000000000000000000000000000000..f263ceaef8f5d66d4ba69007364cb8cae6907e93 --- /dev/null +++ b/socrates/nic400_megasoc_main/nic400_megasoc_main.xml @@ -0,0 +1,2003 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ConfiguredComponent> + <Name>nic400_megasoc_main</Name> + <Suffix>megasoc_main</Suffix> + <ConfigurableComponentRef> + <Vendor>arm.com</Vendor> + <Library>CoreLink</Library> + <Name>nic400</Name> + <Version>r1p2</Version> + </ConfigurableComponentRef> + <Specification> + <Parameters> + <AWUSERWidth>0</AWUSERWidth> + <ARUSERWidth>0</ARUSERWidth> + <WUSERWidth>0</WUSERWidth> + <BUSERWidth>0</BUSERWidth> + <RUSERWidth>0</RUSERWidth> + <GlobalIDWidth>6</GlobalIDWidth> + <HierarchicalClockGating>false</HierarchicalClockGating> + <ClockControllerImplementation>asynchronous</ClockControllerImplementation> + <RSBCentralRing>false</RSBCentralRing> + <DefaultProtocol>axi4</DefaultProtocol> + <UppercaseRTLSignals>true</UppercaseRTLSignals> + <Taxonomy>master_slave</Taxonomy> + <QoSEnabled>false</QoSEnabled> + <QVNEnabled>false</QVNEnabled> + <DPEEnabled>false</DPEEnabled> + <SystemDataWidth>128</SystemDataWidth> + </Parameters> + <Domains> + <VoltageDomains> + <VoltageDomain> + <Name>vd0</Name> + </VoltageDomain> + </VoltageDomains> + <PowerDomains> + <PowerDomain> + <Name>pd0</Name> + <PowerDomainType>AlwaysOn</PowerDomainType> + <VoltageDomainRef>vd0</VoltageDomainRef> + </PowerDomain> + </PowerDomains> + <ClockDomains> + <ClockDomain> + <Name>clk0</Name> + <ClockDomainType>physical</ClockDomainType> + <PowerDomainRef>pd0</PowerDomainRef> + </ClockDomain> + </ClockDomains> + <ClockRelations/> + </Domains> + <Groups> + <ExternalGroups/> + <APBGroups> + <APBGroup> + <Name>apb_group0</Name> + <ClockRef>clk0</ClockRef> + <ReadIssuingAPB>1</ReadIssuingAPB> + <WriteIssuingAPB>1</WriteIssuingAPB> + <TotalIssuingAPB>1</TotalIssuingAPB> + <LockSupport>false</LockSupport> + </APBGroup> + </APBGroups> + </Groups> + <Interfaces> + <SlaveInterface> + <Name>A53</Name> + <AXI4SlaveProtocol> + <AddressWidth>44</AddressWidth> + <DataWidth>128</DataWidth> + <RUSEREnabled>false</RUSEREnabled> + <WUSEREnabled>false</WUSEREnabled> + <VIDWidth>6</VIDWidth> + <MultiRegion>false</MultiRegion> + <TrustZoneSlave>secure</TrustZoneSlave> + <ReadAcceptance>4</ReadAcceptance> + <WriteAcceptance>4</WriteAcceptance> + <QoSTypeAXI>fixed</QoSTypeAXI> + <QoSValue>0</QoSValue> + <TransactionRateRegulation>false</TransactionRateRegulation> + <OutstandingTransactionRegulation>false</OutstandingTransactionRegulation> + <LatencyPeriodRegulation>false</LatencyPeriodRegulation> + <VNExternal>false</VNExternal> + </AXI4SlaveProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + <MultiPorted>false</MultiPorted> + <CyclicDependencyAvoidanceScheme>slave_per_id</CyclicDependencyAvoidanceScheme> + <LowLatency>false</LowLatency> + </SlaveInterface> + <MasterInterface> + <Name>ROM</Name> + <AXI4MasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>64</DataWidth> + <IDWidth>0</IDWidth> + <MultiRegion>false</MultiRegion> + <RUSEREnabled>false</RUSEREnabled> + <WUSEREnabled>false</WUSEREnabled> + <TrustZoneMaster>non_secure</TrustZoneMaster> + <ReadIssuing>1</ReadIssuing> + <WriteIssuing>1</WriteIssuing> + <TotalIssuing>1</TotalIssuing> + <MultiPorted>false</MultiPorted> + <IDWidthReduction>true</IDWidthReduction> + <OutputSignals>false</OutputSignals> + <VNExternal>false</VNExternal> + </AXI4MasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>FLASH</Name> + <AHBLiteInitiatorMasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>32</DataWidth> + <RUSEREnabled>false</RUSEREnabled> + <WUSEREnabled>false</WUSEREnabled> + <ReadIssuing>1</ReadIssuing> + <WriteIssuing>1</WriteIssuing> + <TotalIssuing>1</TotalIssuing> + <TrustZoneMaster>non_secure</TrustZoneMaster> + <MultiPorted>false</MultiPorted> + <LockSupport>false</LockSupport> + </AHBLiteInitiatorMasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>RAM</Name> + <AXI4MasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>64</DataWidth> + <IDWidth>0</IDWidth> + <MultiRegion>false</MultiRegion> + <RUSEREnabled>false</RUSEREnabled> + <WUSEREnabled>false</WUSEREnabled> + <TrustZoneMaster>non_secure</TrustZoneMaster> + <ReadIssuing>1</ReadIssuing> + <WriteIssuing>1</WriteIssuing> + <TotalIssuing>1</TotalIssuing> + <MultiPorted>false</MultiPorted> + <IDWidthReduction>true</IDWidthReduction> + <OutputSignals>false</OutputSignals> + <VNExternal>false</VNExternal> + </AXI4MasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>PERIPHERAL</Name> + <APBMasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>32</DataWidth> + <TrustZoneMasterAPB>non_secure</TrustZoneMasterAPB> + <APBGroupRef>apb_group0</APBGroupRef> + </APBMasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>DRAM</Name> + <AXI4MasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>64</DataWidth> + <IDWidth>0</IDWidth> + <MultiRegion>false</MultiRegion> + <RUSEREnabled>false</RUSEREnabled> + <WUSEREnabled>false</WUSEREnabled> + <TrustZoneMaster>non_secure</TrustZoneMaster> + <ReadIssuing>1</ReadIssuing> + <WriteIssuing>1</WriteIssuing> + <TotalIssuing>1</TotalIssuing> + <MultiPorted>false</MultiPorted> + <IDWidthReduction>true</IDWidthReduction> + <OutputSignals>false</OutputSignals> + <VNExternal>false</VNExternal> + </AXI4MasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>FLASH_CTRL</Name> + <APB4MasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>32</DataWidth> + <TrustZoneMasterAPB>non_secure</TrustZoneMasterAPB> + <APBGroupRef>apb_group0</APBGroupRef> + </APB4MasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>DEBUG</Name> + <APBMasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>32</DataWidth> + <TrustZoneMasterAPB>non_secure</TrustZoneMasterAPB> + <APBGroupRef>apb_group0</APBGroupRef> + </APBMasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + <MasterInterface> + <Name>GIC</Name> + <AXI4MasterProtocol> + <AddressWidth>32</AddressWidth> + <DataWidth>32</DataWidth> + <IDWidth>0</IDWidth> + <MultiRegion>false</MultiRegion> + <ARUSEREnabled>false</ARUSEREnabled> + <AWUSEREnabled>false</AWUSEREnabled> + <RUSEREnabled>false</RUSEREnabled> + <WUSEREnabled>false</WUSEREnabled> + <BUSEREnabled>false</BUSEREnabled> + <TrustZoneMaster>secure</TrustZoneMaster> + <ReadIssuing>1</ReadIssuing> + <WriteIssuing>1</WriteIssuing> + <TotalIssuing>1</TotalIssuing> + <MultiPorted>false</MultiPorted> + <IDWidthReduction>true</IDWidthReduction> + <OutputSignals>false</OutputSignals> + <VNExternal>false</VNExternal> + </AXI4MasterProtocol> + <GeographicDomainRef>gd0</GeographicDomainRef> + <ClockRef>clk0</ClockRef> + </MasterInterface> + </Interfaces> + <MemoryMaps> + <MemoryMap> + <Name>mm0</Name> + <MemoryMapSource> + <InterfaceRef>A53</InterfaceRef> + </MemoryMapSource> + <MappedBlock> + <InterfaceRef>ROM</InterfaceRef> + <Offset>0</Offset> + <Range>65536</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>FLASH</InterfaceRef> + <Offset>4194304</Offset> + <Range>4194304</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>RAM</InterfaceRef> + <Offset>8388608</Offset> + <Range>65536</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>PERIPHERAL</InterfaceRef> + <Offset>1073741824</Offset> + <Range>536870912</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>DRAM</InterfaceRef> + <Offset>2147483648</Offset> + <Range>2147483648</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>FLASH_CTRL</InterfaceRef> + <Offset>16777216</Offset> + <Range>65536</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>DEBUG</InterfaceRef> + <Offset>1610612736</Offset> + <Range>536870912</Range> + <Visibility>true</Visibility> + </MappedBlock> + <MappedBlock> + <InterfaceRef>GIC</InterfaceRef> + <Offset>17825792</Offset> + <Range>32768</Range> + <Visibility>true</Visibility> + </MappedBlock> + </MemoryMap> + </MemoryMaps> + <Paths> + <Path> + <Source> + <InterfaceRef>A53</InterfaceRef> + </Source> + <Targets> + <Target> + <InterfaceRef>ROM</InterfaceRef> + </Target> + <Target> + <InterfaceRef>FLASH</InterfaceRef> + </Target> + <Target> + <InterfaceRef>RAM</InterfaceRef> + </Target> + <Target> + <InterfaceRef>PERIPHERAL</InterfaceRef> + </Target> + <Target> + <InterfaceRef>DRAM</InterfaceRef> + </Target> + <Target> + <InterfaceRef>FLASH_CTRL</InterfaceRef> + </Target> + <Target> + <InterfaceRef>DEBUG</InterfaceRef> + </Target> + <Target> + <InterfaceRef>GIC</InterfaceRef> + </Target> + </Targets> + </Path> + </Paths> + <VirtualNetworks/> + </Specification> + <Architecture> + <NICConfigFile><periph> + <product_version_info major_group="bu" major_revision="1" major_version="00" minor_code="50000" minor_revision="2" minor_version="0" part_quality="rel" product_code="nic400" /> + <validator_version_info major_revision="22" minor_revision="1" /> + <global> + <address0x0 def="true">bottom</address0x0> + <aruser_width>0</aruser_width> + <awuser_width>0</awuser_width> + <buser_width>0</buser_width> + <cc_type>async</cc_type> + <default_protocol>axi4</default_protocol> + <dpe_glb_enable def="true">false</dpe_glb_enable> + <dpe_status>false</dpe_status> + <dpe_width def="true">5</dpe_width> + <gen_caps>true</gen_caps> + <hcg_en>false</hcg_en> + <license_status>unlicensed_nic</license_status> + <periph_id3 def="true">0</periph_id3> + <pl_id_width>6</pl_id_width> + <qos_status>false</qos_status> + <rsb_arch_central_ring>false</rsb_arch_central_ring> + <ruser_width>0</ruser_width> + <sas_visible def="true">false</sas_visible> + <start_iid>0</start_iid> + <taxonomy>masterslave</taxonomy> + <thin_links_status def="true">false</thin_links_status> + <uppercase_ext_sig>true</uppercase_ext_sig> + <virtual_networks /> + <virtual_networks_status>false</virtual_networks_status> + <wuser_width>0</wuser_width> + </global> + <clocks> + <domain freq="100">clk0</domain> + </clocks> + <asib> + <address_ranges> + <name>mm0</name> + <range> + <addr_max>0xFFFF</addr_max> + <addr_min>0x0</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>ROM</target> + </remap> + </range> + <range> + <addr_max>0x7FFFFF</addr_max> + <addr_min>0x400000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>FLASH</target> + </remap> + </range> + <range> + <addr_max>0x80FFFF</addr_max> + <addr_min>0x800000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>RAM</target> + </remap> + </range> + <range> + <addr_max>0x5FFFFFFF</addr_max> + <addr_min>0x40000000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>PERIPHERAL</target> + </remap> + </range> + <range> + <addr_max>0xFFFFFFFF</addr_max> + <addr_min>0x80000000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>DRAM</target> + </remap> + </range> + <range> + <addr_max>0x100FFFF</addr_max> + <addr_min>0x1000000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>FLASH_CTRL</target> + </remap> + </range> + <range> + <addr_max>0x7FFFFFFF</addr_max> + <addr_min>0x60000000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>DEBUG</target> + </remap> + </range> + <range> + <addr_max>0x1107FFF</addr_max> + <addr_min>0x1100000</addr_min> + <remap> + <bit>default</bit> + <present>true</present> + <region>0</region> + <target>GIC</target> + </remap> + </range> + </address_ranges> + <apb_config>false</apb_config> + <apb_slave_no def="true">2</apb_slave_no> + <cds>slaveperid</cds> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <master_if_data_width>128</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>false</multi_region> + <name>A53</name> + <protocol>axi4</protocol> + <qos_config> + <hard>disable</hard> + <lqv>disable</lqv> + <pot>disable</pot> + </qos_config> + <qv> + <type>fixed</type> + <value>0</value> + </qv> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>aw</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>w</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>ar</name> + <type>rev</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>aw</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>ar</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>r</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>b</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <slave_if_addr_width>44</slave_if_addr_width> + <slave_if_data_width>128</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>sec</trustzone> + <vid_width>6</vid_width> + <vn_external>none</vn_external> + <vn_external_bridge>none</vn_external_bridge> + <x>0</x> + <y>20</y> + <master_if_port_name>A53_m</master_if_port_name> + <slave_if_port_name>A53_s</slave_if_port_name> + </asib> + <amib> + <apb_config>false</apb_config> + <apb_slave_no>65</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <compress_id>true</compress_id> + <dest_type>peripheral</dest_type> + <master_if_addr_width>32</master_if_addr_width> + <master_if_data_width>64</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>false</multi_region> + <name>ROM</name> + <protocol>axi4</protocol> + <qv_out>false</qv_out> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>w</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>b</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>r</name> + <type>rev</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>aw</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>ar</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>r</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>b</name> + <type>fifo</type> + </reg> + <slave_if_data_width>64</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>nsec</trustzone> + <vn_external>none</vn_external> + <vn_external_bridge>none</vn_external_bridge> + <x>0</x> + <y>20</y> + <master_if_port_name>ROM_m</master_if_port_name> + <slave_if_port_name>ROM_s</slave_if_port_name> + </amib> + <amib> + <apb_config>false</apb_config> + <apb_slave_no>64</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <compress_id def="true">false</compress_id> + <dest_type>peripheral</dest_type> + <master_if_addr_width>32</master_if_addr_width> + <master_if_data_width>32</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>false</multi_region> + <name>FLASH</name> + <protocol>ahb_m</protocol> + <qv_out>false</qv_out> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>aw</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>ar</name> + <type>rev</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>a</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>d</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>a</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>d</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <slave_if_data_width>128</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>nsec</trustzone> + <vn_external>none</vn_external> + <vn_external_bridge>none</vn_external_bridge> + <x>0</x> + <y>40</y> + <master_if_port_name>FLASH_m</master_if_port_name> + <slave_if_port_name>FLASH_s</slave_if_port_name> + </amib> + <amib> + <apb_config>false</apb_config> + <apb_slave_no>63</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <compress_id>true</compress_id> + <dest_type>peripheral</dest_type> + <master_if_addr_width>32</master_if_addr_width> + <master_if_data_width>64</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>false</multi_region> + <name>RAM</name> + <protocol>axi4</protocol> + <qv_out>false</qv_out> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>w</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>b</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>r</name> + <type>rev</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>aw</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>ar</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>r</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>b</name> + <type>fifo</type> + </reg> + <slave_if_data_width>64</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>nsec</trustzone> + <vn_external>none</vn_external> + <vn_external_bridge>none</vn_external_bridge> + <x>0</x> + <y>60</y> + <master_if_port_name>RAM_m</master_if_port_name> + <slave_if_port_name>RAM_s</slave_if_port_name> + </amib> + <amib> + <apb_config>false</apb_config> + <apb_slave_no>62</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <compress_id>true</compress_id> + <dest_type>peripheral</dest_type> + <master_if_addr_width>32</master_if_addr_width> + <master_if_data_width>64</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>false</multi_region> + <name>DRAM</name> + <protocol>axi4</protocol> + <qv_out>false</qv_out> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>w</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>b</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>r</name> + <type>rev</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>aw</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>ar</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>r</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>b</name> + <type>fifo</type> + </reg> + <slave_if_data_width>64</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>nsec</trustzone> + <vn_external>none</vn_external> + <vn_external_bridge>none</vn_external_bridge> + <x>0</x> + <y>80</y> + <master_if_port_name>DRAM_m</master_if_port_name> + <slave_if_port_name>DRAM_s</slave_if_port_name> + </amib> + <amib> + <apb_config>false</apb_config> + <apb_slave_no>61</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <compress_id>true</compress_id> + <dest_type>peripheral</dest_type> + <master_if_addr_width>32</master_if_addr_width> + <master_if_data_width>32</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>false</multi_region> + <name>GIC</name> + <protocol>axi4</protocol> + <qv_out>false</qv_out> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>w</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>b</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>master_port</location> + <name>r</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>aw</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>ar</name> + <type>rev</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>aw</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>ar</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>r</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>b</name> + <type>fifo</type> + </reg> + <slave_if_data_width>128</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>sec</trustzone> + <vn_external>none</vn_external> + <vn_external_bridge>none</vn_external_bridge> + <x>0</x> + <y>100</y> + <master_if_port_name>GIC_m</master_if_port_name> + <slave_if_port_name>GIC_s</slave_if_port_name> + </amib> + <amib> + <apb_config>false</apb_config> + <apb_port> + <clock_domain>clk0</clock_domain> + <name>PERIPHERAL</name> + <trustzone>nsec</trustzone> + <x>0</x> + <y>0</y> + </apb_port> + <apb_port> + <clock_domain>clk0</clock_domain> + <name>FLASH_CTRL</name> + <trustzone>nsec</trustzone> + <x>0</x> + <y>0</y> + </apb_port> + <apb_port> + <clock_domain>clk0</clock_domain> + <name>DEBUG</name> + <trustzone>nsec</trustzone> + <x>0</x> + <y>0</y> + </apb_port> + <apb_slave_no>60</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <compress_id def="true">false</compress_id> + <dest_type>peripheral</dest_type> + <master_if_addr_width>32</master_if_addr_width> + <master_if_data_width>32</master_if_data_width> + <multi_ported>false</multi_ported> + <multi_region>true</multi_region> + <name>apb_group0</name> + <protocol>apb</protocol> + <qv_out>false</qv_out> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>aw</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>ar</name> + <type>rev</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>a</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>d</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>a</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>d</name> + <type>fifo</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <slave_if_data_width>128</slave_if_data_width> + <token_prerequest def="true">false</token_prerequest> + <token_prerequest_bridge def="true">false</token_prerequest_bridge> + <trustzone>nsec</trustzone> + <vn_external def="true">none</vn_external> + <vn_external_bridge def="true">none</vn_external_bridge> + <x>0</x> + <y>120</y> + <master_if_port_name>PERIPHERAL,FLASH_CTRL,DEBUG</master_if_port_name> + <slave_if_port_name>apb_group0_s</slave_if_port_name> + </amib> + <inter> + <clock_domain>clk0</clock_domain> + <data_width>64</data_width> + <expanded>false</expanded> + <height>40</height> + <impl>mlayer</impl> + <master_if> + <name>axi_m_0</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>63</y> + </master_if> + <master_if> + <name>axi_m_1</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>83</y> + </master_if> + <master_if> + <name>axi_m_2</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>103</y> + </master_if> + <name>bm0</name> + <protocol>axi4</protocol> + <slave_if> + <name>axi_s_0</name> + <x>0</x> + <y>63</y> + </slave_if> + <sparse> + <cds>slaveperid</cds> + <sas def="true">false</sas> + <slave_if_port>axi_s_0</slave_if_port> + <master_if_port> + <name>axi_m_0</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + <master_if_port> + <name>axi_m_1</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + <master_if_port> + <name>axi_m_2</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + </sparse> + <type>busmatrix</type> + <width>0</width> + <x>500</x> + <y>45</y> + <master_if_port_name>axi_m_0,axi_m_1,axi_m_2</master_if_port_name> + <slave_if_port_name>axi_s_0</slave_if_port_name> + </inter> + <inter> + <clock_domain>clk0</clock_domain> + <data_width>128</data_width> + <expanded>false</expanded> + <height>80</height> + <impl>mlayer</impl> + <master_if> + <name>axi_m_0</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>108</y> + </master_if> + <master_if> + <name>axi_m_1</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>128</y> + </master_if> + <master_if> + <name>axi_m_2</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>148</y> + </master_if> + <master_if> + <name>axi_m_3</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>168</y> + </master_if> + <master_if> + <name>axi_m_4</name> + <post_arb_reg>absent</post_arb_reg> + <x>0</x> + <y>188</y> + </master_if> + <name>bm1</name> + <protocol>axi4</protocol> + <slave_if> + <name>axi_s_0</name> + <x>0</x> + <y>108</y> + </slave_if> + <sparse> + <cds>slaveperid</cds> + <sas def="true">false</sas> + <slave_if_port>axi_s_0</slave_if_port> + <master_if_port> + <name>axi_m_0</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + <master_if_port> + <name>axi_m_1</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + <master_if_port> + <name>axi_m_2</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + <master_if_port> + <name>axi_m_3</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + <master_if_port> + <name>axi_m_4</name> + <reg> + <impl def="true">absent</impl> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <name>b</name> + <type def="true">full</type> + </reg> + </master_if_port> + </sparse> + <type>busmatrix</type> + <width>0</width> + <x>500</x> + <y>90</y> + <master_if_port_name>axi_m_0,axi_m_1,axi_m_2,axi_m_3,axi_m_4</master_if_port_name> + <slave_if_port_name>axi_s_0</slave_if_port_name> + </inter> + <inter> + <apb_config def="true">false</apb_config> + <apb_slave_no def="true">2</apb_slave_no> + <clock_boundary>none</clock_boundary> + <clock_domain_name_master_if>clk0</clock_domain_name_master_if> + <clock_domain_name_slave_if>clk0</clock_domain_name_slave_if> + <master_if> + <name>ib2_m</name> + <x>0</x> + <y>0</y> + </master_if> + <master_if_data_width>64</master_if_data_width> + <name>ib2</name> + <qos_config> + <hard>disable</hard> + <lqv>disable</lqv> + <pot>disable</pot> + </qos_config> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>aw</name> + <type>rev</type> + </reg> + <reg> + <impl>present</impl> + <location>slave_port</location> + <name>ar</name> + <type>rev</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>aw</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>aw</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>ar</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>ar</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>r</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>r</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>w</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>w</name> + <type def="true">full</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>slave_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <reg> + <depth def="true">2</depth> + <impl def="true">absent</impl> + <location>boundary</location> + <name>b</name> + <type>fifo</type> + </reg> + <reg> + <impl def="true">absent</impl> + <location>master_port</location> + <name>b</name> + <type def="true">full</type> + </reg> + <slave_if> + <name>ib2_s</name> + <x>0</x> + <y>0</y> + </slave_if> + <slave_if_data_width>128</slave_if_data_width> + <type>ib</type> + <x>0</x> + <y>0</y> + <master_if_port_name>ib2_m</master_if_port_name> + <slave_if_port_name>ib2_s</slave_if_port_name> + </inter> + <inter> + <name>ds_3</name> + <slave_if> + <name>axi_s_0</name> + <x>0</x> + <y>0</y> + </slave_if> + <type>default_slave</type> + <x>500</x> + <y>500</y> + <master_if_port_name /> + <slave_if_port_name>axi_s_0</slave_if_port_name> + </inter> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>A53</dest> + <dest_port>A53_s</dest_port> + <lock>false</lock> + <out_reads>4</out_reads> + <out_trans>8</out_trans> + <out_writes>4</out_writes> + <protocol>axi4</protocol> + <ruser>false</ruser> + <src>external</src> + <src_port>A53</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>ROM</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <ruser>false</ruser> + <src>ROM</src> + <src_port>ROM_m</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>FLASH</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>ahb_m</protocol> + <ruser>false</ruser> + <src>FLASH</src> + <src_port>FLASH_m</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>RAM</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <ruser>false</ruser> + <src>RAM</src> + <src_port>RAM_m</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>DRAM</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <ruser>false</ruser> + <src>DRAM</src> + <src_port>DRAM_m</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>GIC</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <ruser>false</ruser> + <src>GIC</src> + <src_port>GIC_m</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>PERIPHERAL</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>apb3</protocol> + <ruser>false</ruser> + <src>apb_group0</src> + <src_port>PERIPHERAL</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>FLASH_CTRL</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>apb4</protocol> + <ruser>false</ruser> + <src>apb_group0</src> + <src_port>FLASH_CTRL</src_port> + <wuser>false</wuser> + </connect> + <connect> + <aruser>false</aruser> + <awuser>false</awuser> + <buser>false</buser> + <dest>external</dest> + <dest_port>DEBUG</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>apb3</protocol> + <ruser>false</ruser> + <src>apb_group0</src> + <src_port>DEBUG</src_port> + <wuser>false</wuser> + </connect> + <connect> + <dest>ROM</dest> + <dest_port>ROM_s</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <src>bm0</src> + <src_port>axi_m_0</src_port> + </connect> + <connect> + <dest>DRAM</dest> + <dest_port>DRAM_s</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <src>bm0</src> + <src_port>axi_m_1</src_port> + </connect> + <connect> + <dest>RAM</dest> + <dest_port>RAM_s</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>1</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <src>bm0</src> + <src_port>axi_m_2</src_port> + </connect> + <connect> + <dest>bm1</dest> + <dest_port>axi_s_0</dest_port> + <lock>false</lock> + <out_reads>4</out_reads> + <out_trans>8</out_trans> + <out_writes>4</out_writes> + <protocol>axi4</protocol> + <src>A53</src> + <src_port>A53_m</src_port> + </connect> + <connect> + <dest>FLASH</dest> + <dest_port>FLASH_s</dest_port> + <lock>false</lock> + <out_reads def="true">2</out_reads> + <out_trans def="true">4</out_trans> + <out_writes def="true">2</out_writes> + <protocol>axi4</protocol> + <src>bm1</src> + <src_port>axi_m_0</src_port> + </connect> + <connect> + <dest>apb_group0</dest> + <dest_port>apb_group0_s</dest_port> + <lock>false</lock> + <out_reads def="true">2</out_reads> + <out_trans def="true">2</out_trans> + <out_writes def="true">2</out_writes> + <protocol>axi4</protocol> + <src>bm1</src> + <src_port>axi_m_1</src_port> + </connect> + <connect> + <dest>GIC</dest> + <dest_port>GIC_s</dest_port> + <lock>false</lock> + <out_reads def="true">1</out_reads> + <out_trans def="true">1</out_trans> + <out_writes def="true">1</out_writes> + <protocol>axi4</protocol> + <src>bm1</src> + <src_port>axi_m_2</src_port> + </connect> + <connect> + <dest>ib2</dest> + <dest_port>ib2_s</dest_port> + <lock>false</lock> + <out_reads def="true">3</out_reads> + <out_trans>6</out_trans> + <out_writes def="true">3</out_writes> + <protocol>axi4</protocol> + <src>bm1</src> + <src_port>axi_m_3</src_port> + </connect> + <connect> + <dest>bm0</dest> + <dest_port>axi_s_0</dest_port> + <lock>false</lock> + <out_reads def="true">3</out_reads> + <out_trans>6</out_trans> + <out_writes def="true">3</out_writes> + <protocol>axi4</protocol> + <src>ib2</src> + <src_port>ib2_m</src_port> + </connect> + <connect> + <dest>ds_3</dest> + <dest_port>axi_s_0</dest_port> + <lock>false</lock> + <out_reads>1</out_reads> + <out_trans>2</out_trans> + <out_writes>1</out_writes> + <protocol>axi4</protocol> + <src>bm1</src> + <src_port>axi_m_4</src_port> + </connect> + <architecture> + <link> + <slave_if> + <name>A53</name> + <master_if>apb_group0</master_if> + <master_if>DRAM</master_if> + <master_if>FLASH</master_if> + <master_if>DEBUG<parent>apb_group0</parent> + </master_if> + <master_if>PERIPHERAL<parent>apb_group0</parent> + </master_if> + <master_if>FLASH_CTRL<parent>apb_group0</parent> + </master_if> + <master_if>GIC</master_if> + <master_if>RAM</master_if> + <master_if>ROM</master_if> + </slave_if> + </link> + </architecture> +</periph> +</NICConfigFile> + </Architecture> + <Deliverables> + <IPXACT/> + <RTL/> + <TestBench/> + <Reports/> + </Deliverables> +</ConfiguredComponent> \ No newline at end of file diff --git a/socrates/sie300/sie300_axi5_sram_ctrl_1/sie300_axi5_sram_ctrl_1.xml b/socrates/sie300/sie300_axi5_sram_ctrl_1/sie300_axi5_sram_ctrl_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..1e38efdc21855bfbd3a4046139a095c2787db215 --- /dev/null +++ b/socrates/sie300/sie300_axi5_sram_ctrl_1/sie300_axi5_sram_ctrl_1.xml @@ -0,0 +1,111 @@ +<?xml version='1.0' encoding='utf-8'?> +<ConfiguredComponent version="r1p0"> + <Name>sie300_axi5_sram_ctrl_1</Name> + <Suffix>1</Suffix> + <ConfigurationGroupName>sie300</ConfigurationGroupName> + <ConfigurableComponentRef> + <Vendor>arm.com</Vendor> + <Library>CoreLink</Library> + <Name>sie300_axi5_sram_ctrl</Name> + <Version>r1p0_0</Version> + </ConfigurableComponentRef> + <Specification> + <Parameters> + <Parameter> + <Name>ADDR_WIDTH</Name> + <Value>20</Value> + </Parameter> + <Parameter> + <Name>DATA_WIDTH</Name> + <Value>64</Value> + </Parameter> + <Parameter> + <Name>ID_WIDTH</Name> + <Value>6</Value> + </Parameter> + <Parameter> + <Name>QCLK_SYNC_EN</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>QPWR_SYNC_EN</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>QEXT_SYNC_EN</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>EXCLUSIVE_MONITORS</Name> + <Value>2</Value> + </Parameter> + <Parameter> + <Name>AR_BUF_SIZE</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>AW_BUF_SIZE</Name> + <Value>2</Value> + </Parameter> + <Parameter> + <Name>W_BUF_SIZE</Name> + <Value>8</Value> + </Parameter> + <Parameter> + <Name>AXI5_POISON_EN</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>REGISTER_AXI_AR</Name> + <Value>BYPASS</Value> + </Parameter> + <Parameter> + <Name>REGISTER_AXI_R</Name> + <Value>BYPASS</Value> + </Parameter> + <Parameter> + <Name>STRB_WIDTH</Name> + <Value>8</Value> + </Parameter> + <Parameter> + <Name>POIS_WIDTH</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>AW_CNTR_WIDTH</Name> + <Value>5</Value> + </Parameter> + <Parameter> + <Name>MDAT_WIDTH</Name> + <Value>1</Value> + </Parameter> + <Parameter> + <Name>MSTR_WIDTH</Name> + <Value>1</Value> + </Parameter> + </Parameters> + <Domains> + <VoltageDomains> + <VoltageDomain> + <Name>vd0</Name> + <UID>VD-vd0</UID> + </VoltageDomain> + </VoltageDomains> + <PowerDomains> + <PowerDomain> + <Name>pd0</Name> + <UID>PD-pd0</UID> + <Type>AlwaysOn</Type> + <VoltageDomainRef>VD-vd0</VoltageDomainRef> + </PowerDomain> + </PowerDomains> + <ClockDomains> + <ClockDomain> + <Name>clk0</Name> + <UID>CD-clk0</UID> + <PowerDomainRef>PD-pd0</PowerDomainRef> + </ClockDomain> + </ClockDomains> + </Domains> + </Specification> +</ConfiguredComponent> diff --git a/software/lib/common/Makefile.c_host b/software/lib/common/Makefile.c_host new file mode 100644 index 0000000000000000000000000000000000000000..6e6ef35d3b56c48f7a995d3c857cdd01d69dbea0 --- /dev/null +++ b/software/lib/common/Makefile.c_host @@ -0,0 +1,229 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +SW_LIB_PATH := $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/sw_lib +LOCAL_SW_LIB_PATH := $(SW_LIB_PATH)/host +BUILD_DIR_LIB := $(BUILD_DIR)/obj/lib + +# ------------------------------------------------------------------------------ +# Default build target +# ------------------------------------------------------------------------------ + +OUTPUT_FILES = $(BUILD_DIR)/app_ram.v8-a.hex \ + $(BUILD_DIR)/app_flash.v8-a.hex + +all: disp build $(OUTPUT_FILES) +disp: + @echo -e "\n------------------- Executing make target $@ -------------------" + @echo "Running from $(CURDIR)" + @echo "TEST_PATH = $(TEST_PATH)" + @echo "TEST_ID = $(TEST_ID)" + @echo "SW_LIB_PATH = $(SW_LIB_PATH)" + @echo "BUILD_DIR = $(BUILD_DIR)" + @echo "AARCH64 = $(AARCH64)" + +build: + mkdir -p $(BUILD_DIR) + +clean: + @echo "Cleaning all build files" + rm -rf $(BUILD_DIR) + +all_bootloader: disp build $(BUILD_DIR)/bootloader.hex + +# ------------------------------------------------------------------------------ +# Tool setup +# ------------------------------------------------------------------------------ +EM = $(MEM_IMG_PATH)/cg071_elf2mem +FE = fromelf + +ifeq ($(COMPILE_GCC), 0) + AS = armclang + CC = armclang + LD = armlink +else + ifeq ($(AARCH64), 1) + AS = aarch64-none-elf-gcc + CC = aarch64-none-elf-gcc + LD = aarch64-none-elf-gcc + OD = aarch64-none-elf-objdump + else + AS = arm-none-eabi-gcc + CC = arm-none-eabi-gcc + LD = arm-none-eabi-gcc + OD = arm-none-eabi-objdump + endif +endif + +# ------------------------------------------------------------------------------ +# Build Host processor executable image +# ------------------------------------------------------------------------------ +BOOT_CODE ?=0 +# List of object files +ifeq ($(BOOT_CODE), 1) + APP_SRC += $(SW_LIB_PATH)/apps/src/system.c + APP_SRC += $(SW_LIB_PATH)/apps/src/boot.s + APP_SRC += $(SW_LIB_PATH)/apps/src/platform.c + APP_SRC += $(SW_LIB_PATH)/apps/src/cpu_asm_codes.c + APP_SRC += $(SW_LIB_PATH)/apps/src/ipc.s + APP_SRC += $(SW_LIB_PATH)/apps/src/irq.s + APP_SRC += $(SW_LIB_PATH)/apps/src/vect_64.s + APP_SRC += $(SW_LIB_PATH)/apps/src/page_table.s +endif +APP_SRC += $(SW_LIB_PATH)/apps/src/system.c +APP_SRC += $(SW_LIB_PATH)/apps/src/boot.s +APP_SRC += $(SW_LIB_PATH)/apps/src/platform.c +APP_SRC += $(SW_LIB_PATH)/apps/src/cpu_asm_codes.c +APP_SRC += $(SW_LIB_PATH)/apps/src/ipc.s +APP_SRC += $(SW_LIB_PATH)/apps/src/irq.s +APP_SRC += $(SW_LIB_PATH)/apps/src/vect_64.s +APP_SRC += $(SW_LIB_PATH)/apps/src/page_table.s + +# APP_SRC += $(SW_LIB_PATH)/devices/src/dma_350_command_lib.c +# APP_SRC += $(SW_LIB_PATH)/devices/src/pl011_uart.c +# APP_SRC += $(SW_LIB_PATH)/devices/src/intr_rtr_1.c +# APP_SRC += $(SW_LIB_PATH)/devices/src/firewall_1.c +APP_SRC += $(SW_LIB_PATH)/devices/src/gic400.c +# APP_SRC += $(SW_LIB_PATH)/devices/src/watchdog_generic.c +APP_SRC += $(SW_LIB_PATH)/host/src/sys_utils.c +# APP_SRC += $(SW_LIB_PATH)/devices/src/qspi_flash.c +APP_SRC += $(SW_LIB_PATH)/common/src/system_level_functions.c +APP_SRC += $(SW_LIB_PATH)/devices/src/uart_stdout.c +CPU := -mcpu=cortex-a53 +CPU := $(CPU)+nocrypto + +# Cortex-A class ASM flags +APP_AS_FLAGS += -Os -x assembler-with-cpp -mlittle-endian $(CPU) -DAARCH_V8 +ifeq ($(COMPILE_GCC), 0) + APP_AS_FLAGS += -mno-unaligned-access + ifeq ($(AARCH64), 1) + APP_AS_FLAGS += --target=aarch64-arm-none-eabi -DAARCH_V8_64 -D__V8_ARCH__ -D__ARMCC_VERSION__ + else + APP_AS_FLAGS += --target=arm-arm-none-eabi -mfpu=neon -marm -DARM_V8_32BIT + endif +else + APP_AS_FLAGS += -c -std=c99 -D__GCC_COMPILER__ + ifeq ($(AARCH64), 1) + APP_AS_FLAGS += -DARM_V8_64 -D__V8_ARCH__ + else + APP_AS_FLAGS += -DARM_V8_32BIT + endif +endif + +# Cortex-A class ASM include files and directories +APP_AS_INC_DIR += $(LOCAL_SW_LIB_PATH)/include $(SW_LIB_PATH)/common/include $(SW_LIB_PATH)/devices/include/ +ifeq ($(AARCH64), 1) + APP_AS_INC_DIR += $(SW_LIB_PATH)/apps/include/ +else + APP_AS_INC_DIR += $(SW_LIB_PATH)/apps_v8_32/include/ +endif + +# Cortex-A class C flags +APP_C_FLAGS += -Os -mlittle-endian $(CPU) -DAARCH_V8 -DSYNC_CPU_HOST -DTB_UART +ifeq ($(COMPILE_GCC), 0) + APP_C_FLAGS += -mno-unaligned-access + ifeq ($(AARCH64), 1) + APP_C_FLAGS += --target=aarch64-arm-none-eabi -DAARCH_V8_64 -D__V8_ARCH__ + else + APP_C_FLAGS += --target=arm-arm-none-eabi -mfpu=neon -marm -DARM_V8_32BIT + endif +else + APP_C_FLAGS += -c --specs=nosys.specs -std=c99 -D__GCC_COMPILER__ + ifeq ($(AARCH64), 1) + APP_C_FLAGS += -DARM_V8_64 -D__V8_ARCH__ + else + APP_C_FLAGS += -DARM_V8_32BIT + endif +endif + +# Cortex-A class C include files and directories +APP_C_INC_DIR += $(LOCAL_SW_LIB_PATH)/include $(SW_LIB_PATH)/common/include $(SW_LIB_PATH)/devices/include/ +ifeq ($(AARCH64), 1) + APP_C_INC_DIR += $(SW_LIB_PATH)/apps/include/ +else + APP_C_INC_DIR += $(SW_LIB_PATH)/apps_v8_32/include/ +endif + +# Cortex-A class linker scripts / scatter files +APP_SCAT ?= default + +# Cortex-A class linker flags +ifeq ($(COMPILE_GCC), 0) + APP_LD_FLAGS += --no_remove --map --show_parent_lib --show_full_path --list=function_list.v8-a.txt + ifeq ($(AARCH64), 1) + APP_LD_FLAGS += --cpu=8-A.64 + else + APP_LD_FLAGS += --cpu=8.2-A.32 + endif + APP_SCAT_LOAD := --scatter $(LOCAL_SW_LIB_PATH)/scat/$(APP_SCAT).scat +else + APP_LD_FLAGS += --specs=nosys.specs -T $(LOCAL_SW_LIB_PATH)/scat/$(APP_SCAT).ld -static -lc -nostartfiles +endif + +# Auto variable +APP_OBJ = $(addprefix $(BUILD_DIR_LIB)/,$(addsuffix .v8-a.o,$(APP_SRC))) +APP_ASINCLUDES = $(filter .hs, $(APP_AS_INC_DIR)) +APP_CINCLUDES = $(filter .h, $(APP_C_INC_DIR)) + + +# Compile Cortex-A class ASM code +$(BUILD_DIR_LIB)/%.s.v8-a.o: %.s ${APP_ASINCLUDES} + @echo -e "\n------------------- Executing make target $(@F) -------------------" + mkdir -p `dirname $@` + $(AS) $(APP_AS_FLAGS) $(APP_AS_INC_DIR:%=-I%) -c $< -o $@ + +# Compile Cortex-A class C code +$(BUILD_DIR_LIB)/%.c.v8-a.o: %.c ${APP_CINCLUDES} + @echo -e "\n------------------- Executing make target $(@F) -------------------" + mkdir -p `dirname $@` + $(CC) $(APP_C_FLAGS) $(APP_C_INC_DIR:%=-I%) -c $< -o $@ + +# Linking Cortex-A class object files +ifeq ($(COMPILE_GCC), 0) +$(BUILD_DIR)/app.v8-a.elf: $(APP_OBJ) + @echo -e "\n------------------- Executing make target $(@F) -------------------" + $(LD) --debug --verbose $(APP_LD_FLAGS) -o $@ $(filter %.o, $(^)) $(APP_SCAT_LOAD) + $(FE) -c --output $@.disass $@ +else +$(BUILD_DIR)/app.v8-a.elf: $(APP_OBJ) + @echo -e "\n------------------- Executing make target $(@F) -------------------" + $(LD) --debug --verbose $(APP_LD_FLAGS) -o $@ $(filter %.o, $(^)) + $(OD) -a -x -d -w -S $@ > $@.disass +endif + +# ------------------------------------------------------------------------------ +# Build memory device images +# ------------------------------------------------------------------------------ + +$(BUILD_DIR)/app_ram.v8-a.hex: $(BUILD_DIR)/app.v8-a.elf + @echo -e "\n------------------- Executing make target $(@F) -------------------" + $(EM) window,0x00800000,0x600000,0x00800000:vmem,64,1 $^ --compact -v 3 --output $@ + +$(BUILD_DIR)/app_flash.v8-a.hex: $(BUILD_DIR)/app.v8-a.elf + @echo -e "\n------------------- Executing make target $(@F) -------------------" + fromelf --vhx -a --output $@ $^ + # $(EM) window,0x00000000,0x400000,0x00000000:vmem,8,1 $^ --compact --output $@ + +$(BUILD_DIR)/bootloader.hex: $(BUILD_DIR)/app.v8-a.elf + @echo -e "\n------------------- Executing make target $(@F) -------------------" + $(EM) window,0x00000000,0x400000,0x00000000:vmem,64,1 $^ --compact --output $@ + +# ------------------------------------------------------------------------------ +# End of Makefile +# ------------------------------------------------------------------------------ diff --git a/software/lib/common/Makefile.sim b/software/lib/common/Makefile.sim new file mode 100755 index 0000000000000000000000000000000000000000..4d1d80c780bc6f85911bd2acd1a85f31fe277743 --- /dev/null +++ b/software/lib/common/Makefile.sim @@ -0,0 +1,60 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ +TESTCODES_BUILD_DIR = $(SOCLABS_MEGASOC_TECH_DIR)/software/build +BUILD_DIR = $(TESTCODES_BUILD_DIR)/$(TEST_ID) +SCRIPTS_DIR = $(SOCLABS_MEGASOC_TECH_DIR)/software/shared/tools/bin +MEM_IMG_PATH = $(SCRIPTS_DIR)/memory_image_scripts + +export BUILD_DIR +export MEM_IMG_PATH + +# ------------------------------------------------------------------------------ +# Default build target +# ------------------------------------------------------------------------------ +.PHONY: all +all: print get_host_image + ln -sf $(SOCLABS_MEGASOC_TECH_DIR)/software/shared/bin/cxdt_empty.bin $(SOCLABS_MEGASOC_TECH_DIR)/software/shared/bin/cxdt.bin + +boot_build: print get_boot_image + ln -sf $(SOCLABS_MEGASOC_TECH_DIR)/software/shared/bin/cxdt_empty.bin $(SOCLABS_MEGASOC_TECH_DIR)/software/shared/bin/cxdt.bin + +.PHONY: get_host_image +get_host_image: + @echo -e "\n------------------- Executing make target $@ -------------------" + make all -f $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.c_host + @rm -rf $(BUILD_DIR)/host0; mkdir -p $(BUILD_DIR)/host0; mv $(BUILD_DIR)/*.v8-a.* $(BUILD_DIR)/obj $(BUILD_DIR)/host0 + +.PHONY: get_boot_image +get_boot_image: + @echo -e "\n------------------- Executing make target $@ -------------------" + make all_bootloader -f $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.c_host + @rm -rf $(BUILD_DIR)/host0; mkdir -p $(BUILD_DIR)/host0; mv $(BUILD_DIR)/*.v8-a.* $(BUILD_DIR)/obj $(BUILD_DIR)/host0 + +.PHONY: print +print: + @echo -e "\n------------------- Executing make target $@ -------------------" + @echo "CURDIR = $(CURDIR)" + @echo "TEST_PATH = $(TEST_PATH)" + @echo "TEST_ID = $(TEST_ID)" + +.PHONY: clean +clean: + @echo -e "\n------------------- Executing make target $@ -------------------" + @echo "Cleaning all build files" + rm -rf $(BUILD_DIR) diff --git a/software/lib/sw_lib/apps/include/cpu_asm_codes.h b/software/lib/sw_lib/apps/include/cpu_asm_codes.h new file mode 100755 index 0000000000000000000000000000000000000000..bc3ba954eea55c8a0abe27f114c3130d70f1e557 --- /dev/null +++ b/software/lib/sw_lib/apps/include/cpu_asm_codes.h @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + *------------------------------------------------------------------------------ + * Purpose : V8/V8.2 CPU ASM Functions Defines + * + * ----------------------------------------------------------------------------- +*/ + +#ifndef __CPUINFO_H__ +#define __CPUINFO_H__ + +#ifdef __cplusplus + extern "C" { +#endif + +#include <stdint.h> + +void enable_caches(void); +void enable_caches_el1(void); +void invalidate_caches(void); +void invalidateclean_cacheline(uint32_t *ptr); +uint32_t get_cpu_core_number(void); +void spin_lock_init(volatile unsigned short *mutex); +void spin_lock_s(volatile unsigned short *mutex); +void spin_unlock_s(volatile unsigned short *mutex); +void power_down_cpu_core(void); +void enable_smp(void); +void cpu_ret_control(uint32_t val); +void l2_retention(void); +void cpu_warm_reset_AArch64(uint32_t); +uint32_t get_cluster_id(void); +uint32_t get_cpu_thread_number(void); +void cluster_pwr_cntrl(uint32_t val); +void cluster_pwr_down(uint32_t val); +void switch_el1_ns(uint32_t stack_addr, uint32_t el1_exc_addr); +void tlb_invalidate_by_va(uint32_t *ptr); +uint32_t get_core_id(void); + +#ifdef __cplusplus +} +#endif + +#endif // __CPUINFO_H__ diff --git a/software/lib/sw_lib/apps/include/intrinsics.h b/software/lib/sw_lib/apps/include/intrinsics.h new file mode 100755 index 0000000000000000000000000000000000000000..b911e7bad6408664a36edc2dfdee0b75e0ef02e4 --- /dev/null +++ b/software/lib/sw_lib/apps/include/intrinsics.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + *------------------------------------------------------------------------------ + * Purpose : INTRINSICS for gcc and RCVT + * + * ----------------------------------------------------------------------------- +*/ + +#ifndef _INTRINSICS_H_ +#define _INTRINSICS_H_ + +/* GCC Specific Intrinsics */ +#ifdef __GCC_COMPILER__ + + +#define __nop() __asm__ __volatile__ ( " nop\n" ) + +#define __dsb(TYPE) __asm__ __volatile__ (" dsb %0" \ + : \ + : "I" (TYPE) ) + +#define __dmb(TYPE) __asm__ __volatile__ (" dmb %0" \ + : \ + : "I" (TYPE) ) + +#define __isb(TYPE) __asm__ __volatile__ (" isb %0" \ + : \ + : "I" (TYPE) ) + + +#define __sev() __asm__ __volatile__ ( " sev\n" ) +#define __sevl() __asm__ __volatile__ ( " sevl\n" ) + +#define __ASM __asm__ + +#else + +/* RVCT Specific Intrinsics */ +#define __ASM asm + +#endif + + +#endif diff --git a/software/lib/sw_lib/apps/include/ipc.h b/software/lib/sw_lib/apps/include/ipc.h new file mode 100755 index 0000000000000000000000000000000000000000..cb0f25b2d0d546e3f51f6171cecf2237499eaa96 --- /dev/null +++ b/software/lib/sw_lib/apps/include/ipc.h @@ -0,0 +1,65 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + *------------------------------------------------------------------------------ + * Purpose : Common Header File + * + * ----------------------------------------------------------------------------- +*/ + +#ifndef __IPC_H__ +#define __IPC_H__ + +#ifdef __cplusplus + extern "C" { +#endif + +/* Function prototype - get cpu id */ +extern unsigned get_cpuid(void); + +/* Function prototype - instruction synchronization barrier */ +extern void instr_sync_barrier(void); + +/* Function prototype - data synchronization barrier */ +extern void data_sync_barrier(void); + +/* Function prototype - checks for uniprocessor system */ +extern int is_uniprocessor(void); + +/* Function prototype - wait for event */ +extern void wait_for_event(void); + +/* Function prototype - wait for event */ +extern void call_wfe(void); + +/* Function prototype - send event */ +extern void send_event(void); + +/* Function prototype - call nop */ +extern void call_nop(void); + +/* ----------------------------------------------------------------------------- + * End of ipc.h + * ----------------------------------------------------------------------------- + */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* __IPC_H__ */ + diff --git a/software/lib/sw_lib/apps/include/irq.h b/software/lib/sw_lib/apps/include/irq.h new file mode 100755 index 0000000000000000000000000000000000000000..9382c466fba38a183987a064917190dfafade540 --- /dev/null +++ b/software/lib/sw_lib/apps/include/irq.h @@ -0,0 +1,49 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + *------------------------------------------------------------------------------ + * Purpose : IRQ Header File + * + * ----------------------------------------------------------------------------- +*/ + +#ifndef __IRQ_H__ +#define __IRQ_H__ + +#ifdef __cplusplus + extern "C" { +#endif + +/* Function prototype - wfi function */ +extern void call_wfi(void); + +/* Function prototype - enable the irq flag in cpu */ +extern void enable_irq(void); + +/* Function prototype - disable the irq flag in cpu */ +extern void disable_irq(void); + +/* ----------------------------------------------------------------------------- + * End of irq.h + * ----------------------------------------------------------------------------- + */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __IRQ_H__ */ + diff --git a/software/lib/sw_lib/apps/include/system.h b/software/lib/sw_lib/apps/include/system.h new file mode 100755 index 0000000000000000000000000000000000000000..44061ee1cdc664262ec9dc6b699b1be91aefe971 --- /dev/null +++ b/software/lib/sw_lib/apps/include/system.h @@ -0,0 +1,73 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + *------------------------------------------------------------------------------ + * Purpose : Application processor system peripheral base, irq and common functions defines + * + * ----------------------------------------------------------------------------- + */ + +#ifndef __SYSTEM_APP_H__ +#define __SYSTEM_APP_H__ + +#ifdef __cplusplus + extern "C" { +#endif + +#include <arm_acle.h> +#include "irq.h" +#include "ipc.h" +#include "cpu_asm_codes.h" + +/* Simple IO macro + * Given a interger type `base' pointer and `offset', the HW_REG returns de- + * referenced pointer. Note that since `base' is 4byte size 32bit system, the + * `offset' needs to be divided by 4 to give correct increment + */ +#define HW_REG(base,offset) *(((volatile unsigned int *)((unsigned long long)base)) + (offset >> 2)) +#define MEM_RW(base,offset) *(((volatile unsigned int *)((unsigned long long)base)) + (offset >> 2)) + +#define __WFI() call_wfi() +#define __wfi() call_wfi() + +#define __WFE() call_wfe() +#define __wfe() call_wfe() + +extern void abort_handler(void); + +void TEST_PASS(void); +void TEST_FAIL(void); + +int c_print_str(const char * fmt); +void c_print_char(const char ch); + +int c_print(const char * fmt, ...); + +int access_addr_wdata(unsigned long int base_address, unsigned int num_accesses, unsigned int write_data); + + +/* Fast Printf */ +#define printf c_print + +/* ----------------------------------------------------------------------------- + * End of system.h + * ----------------------------------------------------------------------------- + */ + +#ifdef __cplusplus +} +#endif + +#endif // __SYSTEM_APP_H__ diff --git a/software/lib/sw_lib/apps/src/boot.s b/software/lib/sw_lib/apps/src/boot.s new file mode 100755 index 0000000000000000000000000000000000000000..6ab03f8b57825151f87c6fca34d71905f2335909 --- /dev/null +++ b/software/lib/sw_lib/apps/src/boot.s @@ -0,0 +1,162 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +//------------------------------------------------------------------------------ +// Purpose : v8 cortex-A class processor 1st stage boot +// +// ----------------------------------------------------------------------------- + + + .section SECURE_ROM_BOOT, "ax" + .balign 8 + + + .global Image$$ARM_LIB_STACK$$ZI$$Limit + .global __main + .global monitor_vectors + // .global __stack_multi_cpu_init + + .weak monitor_vectors +// ------------------------------------------------------------------------------ +// Core initialisation from reset state +// ------------------------------------------------------------------------------ + .global app_bl1_entry + .type app_bl1_entry, @function + + + +app_bl1_entry: + + MOV x0,#0x0 + MOV x1,x0 + MOV x2,x0 + MOV x3,x0 + MOV x4,x0 + MOV x5,x0 + MOV x6,x0 + MOV x7,x0 + MOV x8,x0 + MOV x9,x0 + MOV x10,x0 + MOV x11,x0 + MOV x12,x0 + MOV x13,x0 + MOV x14,x0 + MOV x15,x0 + MOV x16,x0 + MOV x17,x0 + MOV x18,x0 + MOV x19,x0 + MOV x20,x0 + MOV x21,x0 + MOV x22,x0 + MOV x23,x0 + MOV x24,x0 + MOV x25,x0 + MOV x26,x0 + MOV x27,x0 + MOV x28,x0 + MOV x29,x0 + MOV x30,x0 + + MSR SP_EL0,x0 + MSR SP_EL1,x0 + MSR SP_EL2,x0 + MOV sp,x0 + MSR ELR_EL1,x0 + MSR ELR_EL2,x0 + MSR ELR_EL3,x0 + MSR SPSR_EL1,x0 + MSR SPSR_EL2,x0 + MSR SPSR_EL3,x0 + + +//=================================================================== +// Set Vector Base Address Register (VBAR) to point to this application's vector table +//=================================================================== + LDR x0, =monitor_vectors + MSR VBAR_EL3, x0 // EL3 sets vector base address + +//=================================================================== +// IRQ/FIQ/External Abort and SError Interrupt Routing taken in Monitor mode +//=================================================================== + MRS x0, scr_el3 + ORR x0, x0, #0xe + MSR scr_el3, x0 +// + MRS x1, CPACR_EL1 + ORR x1, x1, #0xf00000 //// co-pro access for VFP/Neon + MSR CPACR_EL1, x1 +// + MRS x1, CPTR_EL3 + AND x1, x1, # ~ ( 1 << 10 ) //// clear TFP bit + MSR CPTR_EL3, x1 + +//=================================================================== +// Clear the PSTATE.A fpr enabling SError Aborts (Posion Error) +//=================================================================== + + MSR DAIFCLR, #0x4 + ISB + + +//=================================================================== +// Enable NEON and initialize the register bank +//=================================================================== + MRS x0, ID_AA64PFR0_EL1 + SBFX x5, x0, #16, #4 // Extract the floating-point field + + MOV x1, #(0x3 << 20) + MSR cpacr_el1, x1 + MRS x1, cptr_el3 + + BIC x1, x1, #(0x1 << 10) // Ensure that CPTR_EL3.TFP is clear + MSR cptr_el3, x1 + ISB sy +#ifndef NOFP + FMOV d0, xzr + FMOV d1, xzr + FMOV d2, xzr + FMOV d3, xzr + FMOV d4, xzr + FMOV d5, xzr + FMOV d6, xzr + FMOV d7, xzr + FMOV d8, xzr + FMOV d9, xzr + FMOV d10, xzr + FMOV d11, xzr + FMOV d12, xzr + FMOV d13, xzr + FMOV d14, xzr + FMOV d15, xzr + FMOV d16, xzr + FMOV d17, xzr + FMOV d18, xzr + FMOV d19, xzr + FMOV d20, xzr + FMOV d21, xzr + FMOV d22, xzr + FMOV d23, xzr + FMOV d24, xzr + FMOV d25, xzr + FMOV d26, xzr + FMOV d27, xzr + FMOV d28, xzr + FMOV d29, xzr + FMOV d30, xzr + FMOV d31, xzr +#endif + B __main diff --git a/software/lib/sw_lib/apps/src/cpu_asm_codes.c b/software/lib/sw_lib/apps/src/cpu_asm_codes.c new file mode 100755 index 0000000000000000000000000000000000000000..626d49676f1133b0e436f38bd450e8441ac1ecb7 --- /dev/null +++ b/software/lib/sw_lib/apps/src/cpu_asm_codes.c @@ -0,0 +1,458 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + * ------------------------------------------------------------------------------*/ + +/** @file cpu_asm_codes.c + * @brief Helper functions to query/configure various features of the ARMv8/8.2 CORE + * + * These functions are used in various tests to enable MMU/CACHES, invalidate caches, + * power down core, switch core to EL2 and generate exclusive access using spin lock + * + * + */ + +#include <arm_acle.h> +#include "cpu_asm_codes.h" +#include "system.h" +#include "intrinsics.h" + +/** @brief synchronous external abort handler, default functionality is TEST_FAIL(), unless overriden by test owner <br> + * @return void + */ + +__attribute__((weak)) void sync_ext_abort_handler(void) +{ + TEST_FAIL(); + +} + +/** @brief Test Execution entry point for all slave CPU's, default functionality is execute __wfe, unless overriden by test owner <br> + * @return void + */ + +__attribute__((weak)) void cpu_test(void) +{ + c_print_str("&\n"); + while(1) { + __wfe(); + } +} + +/** @brief Identify Main Core ID Register, function reads MIDR register and returns Main Core ID <br> + * @return uint32_t + */ + +uint32_t get_core_id(void) +{ + uint32_t cpu_core_id; + __ASM volatile ( "MRS %x[output], midr_el1 \n\t" + "MOV x1, 0xffff \n\t" + "AND %x[output], %x[output], x1 \n\t" :[output] "=r" (cpu_core_id)::); + + return cpu_core_id; +} + +/** @brief Identify Core number, function reads MPIDR register and returns Core number <br> + * @return uint32_t + */ + +uint32_t get_cpu_core_number(void) +{ + uint32_t cpu_core_no; + +#ifndef __V8_ARCH__ + __ASM volatile ( "MRS %x[output], mpidr_el1 \n\t" + "MOV x1, 0xf00 \n\t" + "AND %x[output], %x[output], x1 \n\t" + "UBFX %w[output],%w[output],#8,#4\n\t" :[output] "=r" (cpu_core_no)::); +#else + __ASM volatile ( "MRS %x[output], mpidr_el1 \n\t" + "MOV x1, 0xf \n\t" + "AND %x[output], %x[output], x1 \n\t" :[output] "=r" (cpu_core_no)::); +#endif + + return cpu_core_no; +} + +/** @brief cpu thread number, function reads MPIDR register and returns thread number in multi_threaded CPU <br> + * @return uint32_t + */ + +uint32_t get_cpu_thread_number(void) +{ + uint32_t cpu_thread_no; + __ASM volatile ( "MRS %x[output], mpidr_el1 \n\t" + "MOV x1, 0xff \n\t" + "AND %x[output], %x[output], x1 \n\t" :[output] "=r" (cpu_thread_no)::); + return cpu_thread_no; +} + +/** @brief cluster id, function reads MPIDR register and returns cluster number <br> + * @return uint32_t + */ + +uint32_t get_cluster_id(void) +{ + uint32_t cluster_id; + +#ifndef __V8_ARCH__ + __ASM volatile ("mrs %x[output], mpidr_el1 \n\t" + "lsr %x[output], %x[output], #16 \n\t" + "and %x[output], %x[output], #0xFF\n\t" : [output] "=r" (cluster_id)::); +#else + __ASM volatile ("mrs %x[output], mpidr_el1 \n\t" + "lsr %x[output], %x[output], #8 \n\t" + "and %x[output], %x[output], #0xFF\n\t" : [output] "=r" (cluster_id)::); +#endif + + return cluster_id; +} + +/** @brief socket id, function reads MPIDR register and returns socket number <br> + * @return uint32_t + */ + +uint32_t get_socket_id(void) +{ + uint64_t socket_id; + + __ASM volatile ("mrs %x[output], mpidr_el1 \n\t" + "lsr %x[output], %x[output], #32 \n\t" + "and %x[output], %x[output], #0xFF\n\t" : [output] "=r" (socket_id)::); + return socket_id; +} + +// /** @brief enable_mmu, function to enable_mmu in el3 <br> +// * @return void +// */ + +// void enable_mmu(void) +// { +// __ASM volatile ( "ldr x0, =pgtbl1 \n\t" +// "msr TTBR0_EL3, x0 \n\t" +// "mov x1, #0x00FF \n\t" // Normal & Device memory attributes +// "msr mair_el3, x1 \n\t" +// "ldr x1, =0x00102F20 \n\t" +// "msr tcr_el3, x1 \n\t" +// "ic iallu \n\t" +// "tlbi alle3 \n\t" +// "mrs x0, sctlr_el3 \n\t" +// "mov x1, #0x0001 \n\t" // Turn on MMU +// "orr x0, x0, x1 \n\t" +// "msr SCTLR_EL3, x0 \n\t" +// "isb \n\t" :::"x0", "x1"); +// } + +// /** @brief enable_mmu in el1, function to enable_mmu in el1 <br> +// * @return void +// */ + +// void enable_mmu_el1(void) +// { +// __ASM volatile ( "ldr x0, =pgtbl1 \n\t" +// "msr TTBR0_EL1, x0 \n\t" +// "mov x1, #0x00FF \n\t" // Normal & Device memory attributes +// "msr mair_el1, x1 \n\t" +// "ldr x1, =0x00102F20 \n\t" +// "msr tcr_el1, x1 \n\t" +// "ic iallu \n\t" +// "mrs x0, sctlr_el1 \n\t" +// "mov x1, #0x0001 \n\t" // Turn on MMU +// "orr x0, x0, x1 \n\t" +// "msr SCTLR_EL1, x0 \n\t" +// "isb \n\t" :::"x0", "x1"); +// } + +/** @brief enable_caches, function to enable caches <br> + * @return void + */ + +void enable_caches(void) +{ + __ASM volatile ( "ic iallu \n\t" //; Invalidate I cache and BTAC + "dsb sy \n\t" + "isb \n\t" + "mrs x0, sctlr_el3 \n\t" + "mov x1, #0x1004 \n\t" //; Turn on caches + "orr x0, x0, x1 \n\t" + "msr SCTLR_EL3, x0 \n\t" + "isb \n\t" ::: "x0", "x1"); +} + +/** @brief enable_caches_el1, function to enable caches in el1 <br> + * @return void + */ + +void enable_caches_el1(void) +{ + __ASM volatile ( "ic iallu \n\t" + "dsb sy \n\t" + "isb \n\t" + "MRS x0, SCTLR_EL1 \n\t" + "MOV X1, #0x1004 \n\t" //; Turn on caches + "ORR x0, x0, x1 \n\t" + "MSR SCTLR_EL1, x0 \n\t" + "ISB \n\t" ::: "x0", "x1"); +} + +/** @brief instr_cache_line_invalidate, invalidate instruction cache line by address <br> + * @return void + */ + +void instr_cache_line_invalidate(uint32_t *ptr) +{ + __ASM volatile ( "ic ivau, %x[input] \n\t" + "dsb sy \n\t" ::[input] "r" (ptr):); +} + +/** @brief invalidateclean_cacheline, invalidate data cache line by address <br> + * @return void + */ + +void invalidateclean_cacheline(uint32_t *ptr) +{ + __ASM volatile ( "dc civac, %x[input] \n\t" //; IVAC = Invalidate, by Virtual Adress, to point of Coherence + "dsb sy \n\t" ::[input] "r" (ptr):); //; Dont want no funky order +} + +/** @brief tlb_invalidate_by_va, invalidate TLB by VA <br> + * @return void + */ + +void tlb_invalidate_by_va(uint32_t *ptr) +{ + __ASM volatile ( "tlbi vaae1is, %x[input] \n\t" + "dsb sy \n\t" + "isb \n\t" ::[input] "r" (ptr):); +} + +/** @brief invalidate_caches, invalidate Level-1 Data cache <br> + * @return void + */ + +void invalidate_caches(void) +{ + __ASM volatile ( " mrs x0, clidr_el1 \n\t" //; read clidr + " ands w3, w0, #0x7000000 \n\t" //; extract loc from clidr + " lsr w3, w3, #23 \n\t" //; left align loc bit field + " b.eq finished \n\t" //; if loc is 0, then no need to clean + " mov w10, #0 \n\t" //; start clean at cache level 0 (in x10) + "loop1: \n\t" + " add w2, w10, w10, lsr #1 \n\t" //; work out 3x current cache level + " lsr w12, w0, w2 \n\t" //; extract cache type bits from clidr + " and w12, w12, #7 \n\t" //; mask of the bits for current cache only + " cmp w12, #2 \n\t" //; see what cache we have at this level + " b.lt skip \n\t" //; skip if no cache, or just i-cache + " msr csselr_el1, x10 \n\t" //; select current cache level in cssr + " isb \n\t" //; sync the new context + " mrs x12, ccsidr_el1 \n\t" // ; read the new csidr + " and w2, w12, #7 \n\t" //; extract the length of the cache lines + " add w2, w2, #4 \n\t" //; add 4 (line length offset) + " mov w6, #0x3ff \n\t" // + " ands w6, w6, w12, lsr #3 \n\t" //; find maximum number on the way size + " clz w5, w6 \n\t" //; find bit position of way size increment + " mov w7, #0x7fff \n\t" + " ands w7, w7, w12, lsr #13 \n\t" //; extract max number of the index size + "loop2: \n\t" + " mov w8, w6 \n\t" //; create working copy of max way size + "loop3: \n\t" + " lsl w9, w8, w5 \n\t" + " lsl w13, w7, w2 \n\t" + " orr w11, w10, w9 \n\t" //; factor way and cache number into x11 + " orr w11, w11, w13 \n\t" //; factor index number into x11 + " dc cisw, x11 \n\t" // ; invalidate by set/way + " subs w8, w8, #1 \n\t" //; decrement the way + " b.ge loop3 \n\t" + " subs w7, w7, #1 \n\t" //; decrement the index + " b.ge loop2 \n\t" + "skip: \n\t" + " //add w10, w10, #2 \n\t" //; increment cache number + " //cmp w3, x10 \n\t" // + " //b.gt loop1 \n\t" // + "finished: \n\t" // + " mov x0, #0 \n\t" // + " msr csselr_el1, x0 \n\t" // + " dsb sy \n\t" // + " isb \n\t" :::"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12"); +} + +/** @brief spin_lock_init, initialization of spinlock routine <br> + * @return void + */ + +void spin_lock_init(volatile unsigned short *mutex) +{ + *mutex = 0; +} + +/** @brief spin_lock_s, locks the spinlock <br> + * @return void + */ + +void spin_lock_s(volatile unsigned short *mutex) +{ + __ASM volatile ( " mov w2, #0x1 \n" + "spin: \n\t" + " ldxrh w1, [%x[input]] \n\t" //; see what state the lock is in + " cmp w1, #0x1 \n\t" //; locked already? + " b.eq 1f \n\t" + " wfe \n\t" //; wait if it's locked + " b.ne spin \n" + "1: \n\t" + " stxr w1, w2, [%x[input]] \n\t" //; try to lock it, if it's unlocked + " cbnz w1, spin \n\t" //; did we fail? //; if any failure, loop + " dmb #0x3 \n\t"::[input] "r" (mutex): "w1", "w2"); //; make sure subsequent accesses appear after the lock +} + +/** @brief spin_unlock_s, unlocks the spinlock <br> + * @return void + */ + +void spin_unlock_s(volatile unsigned short *mutex) +{ + __ASM volatile ("dmb #0x3 \n\t" + "mov w1, #0x1 \n\t" + "strh w1, [%x[input]] \n\t" //; release spin lock + "dsb #0x3 \n\t" //; ensure completion of the store to clear the lock + "sev \n\t" ::[input] "r" (mutex): "w1"); //; unlock any spinlock +} + +/** @brief enable_smp, enables SMP bit, only applicable for v8 Cores <br> + * @return void + */ + +void enable_smp(void) +{ +#ifdef __V8_ARCH__ + __ASM volatile ( "mov x1, #0x40 \n\t" + "mrs x0, S3_1_c15_c2_1 \n\t" + "orr x0, x0, x1 \n\t" + "msr S3_1_c15_c2_1, x0 \n\t" :::"x0", "x1"); +#endif +} + +/** @brief power_down_cpu_core, Power downs individual cores, Only applicable for v8 Cores <br> + * @return void + */ + +void power_down_cpu_core(void) +{ + + __ASM volatile ("isb \n\t" + "dsb sy \n\t" + "mov x1, #0x40 \n\t" /* Switch the processor from Symmetric Multiprocessing (SMP) mode to Asymmetric Multiprocessing (AMP) */ + "mrs x0, s3_1_c15_c2_1 \n\t" + "bic x0, x0, x1 \n\t" + "msr s3_1_c15_c2_1, x0 \n\t" + "mov x1, #0x40 \n\t" /* Set the DBGOSDLR.DLK bit */ + "mrs x0, osdlr_el1 \n\t" + "orr x0, x0, #0x1 \n\t" + "msr osdlr_el1, x0 \n\t" + "isb \n\t" + "dsb sy \n\t" + "wfi \n\t" :::"x0", "x1", "x2"); +} + +/** @brief cpu_ret_control, configures cpu in retention <br> + * @return void + */ + +void cpu_ret_control(uint32_t val) +{ +#ifndef __V8_ARCH__ + __ASM volatile ("msr s3_0_c15_c2_7, %x[input] \n\t" ::[input] "r" (val)); +#else + /* CPU Retention control with 512 Architecture Timer ticks */ + __ASM volatile ("mov x0, #0x7 \n\t" + "mrs x1, s3_1_c15_c2_1 \n\t" + "orr x1, x1, x0 \n\t" + "msr s3_1_c15_c2_1, x1 \n\t" :::"x0", "x1"); +#endif +} + +/** @brief l2_retention, enables l2 retention, only applicable for v8 Processor <br> + * @return void + */ + +void l2_retention(void) +{ +#ifdef __V8_ARCH__ + __ASM volatile ("mrs x0, s3_1_c11_c0_3 \n\t" + "orr x0, x0, #0x7 \n\t" /* 512 Generic Timer ticks required before retention entry */ + "orr x0, x0, #0x7 \n\t" + "msr s3_1_c11_c0_3, x0 \n\t" :: :"x0"); +#endif +} + +/** @brief cpu_warm_reset_AArch64, generates warm reset <br> + * @return void + */ + +void cpu_warm_reset_AArch64(uint32_t val) +{ + /* Generate WARM reset */ + __ASM volatile ("msr rmr_el3, %x[input] \n\t" + "isb \n\t" + "wfi \n\t" ::[input] "r" (val):); +} + +/** @brief cluster_pwr_cntrl, Cluster Power Control Register for L3 data RAM retention <br> + * @return void + */ + +void cluster_pwr_cntrl(uint32_t val) +{ + /* Generate WARM reset */ + __ASM volatile ("msr s3_0_c15_c3_5, %x[input] \n\t" + "isb \n\t" ::[input] "r" (val):); +} + + +/** @brief cluster_pwr_down, cluster power down Register <br> + * @return void + */ + +void cluster_pwr_down(uint32_t val) +{ + __ASM volatile ("msr s3_0_c15_c3_6, %x[input] \n\t" + "isb \n\t" ::[input] "r" (val):); +} + +/** @brief switch_el1_ns, witch EL1 in non-secure mode <br> + * @return void + */ + +void switch_el1_ns(uint32_t stack_addr, uint32_t el1_exc_addr) +{ + __ASM volatile ( + " MRS x1, scr_el3 \n\t" + " MOV x2, #0xC01 \n\t" + " ORR x1, x1, x2 \n\t" + " MSR scr_el3, x1 \n\t" + " MSR SP_EL1, %x[input0] \n\t" + " MSR ELR_EL3, %x[input1] \n\t" + " mov x0, #0x5 \n\t" + " MSR SPSR_EL3, x0 \n\t" + " MOV x0, #0x0 \n\t" + " ORR x0, x0, #(1 << 31) \n\t" + " MSR HCR_EL2, x0 \n\t" + " ISB \n\t" + " ERET \n\t" ::[input0] "r" (stack_addr), [input1] "r" (el1_exc_addr) : "x0", "x1", "x3"); + +} + + + diff --git a/software/lib/sw_lib/apps/src/ipc.s b/software/lib/sw_lib/apps/src/ipc.s new file mode 100755 index 0000000000000000000000000000000000000000..7371152242df3e0c695dc98e766e79d915e4feff --- /dev/null +++ b/software/lib/sw_lib/apps/src/ipc.s @@ -0,0 +1,61 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +//------------------------------------------------------------------------------ +// Purpose : Lowlevel routines for Inter-Processor communication +// ----------------------------------------------------------------------------- + .section helpers_mp + .text + .align 8 + + + .global wait_for_event + .global call_wfe + .global call_nop + .global send_event + + .global instr_sync_barrier + .global data_sync_barrier + + .type wait_for_event, @function +wait_for_event : + WFE + RET + + .type call_wfe, @function +call_wfe : + WFE + RET + + .type call_nop, @function +call_nop : + NOP + RET + + .type send_event, @function +send_event : + SEV + RET + + + .type instr_sync_barrier, @function +instr_sync_barrier : + ISB SY + RET + + .type data_sync_barrier, @function +data_sync_barrier : + DSB SY + RET diff --git a/software/lib/sw_lib/apps/src/irq.s b/software/lib/sw_lib/apps/src/irq.s new file mode 100755 index 0000000000000000000000000000000000000000..cb5f2d33b37fe06a963514e2646f4af74dcef98f --- /dev/null +++ b/software/lib/sw_lib/apps/src/irq.s @@ -0,0 +1,53 @@ +//; ------------------------------------------------------------------------------ +//; The confidential and proprietary information contained in this file may +//; only be used by a person authorised under and to the extent permitted +//; by a subsisting licensing agreement from Arm Limited or its affiliates. +//; +//; (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +//; ALL RIGHTS RESERVED +//; +//; This entire notice must be reproduced on all copies of this file +//; and copies of this file may only be made by a person if such person is +//; permitted to do so under the terms of a subsisting license agreement +//; from Arm Limited or its affiliates. +//; +//; Release Information : SSE710-r0p0-00rel0 +//; +//; ------------------------------------------------------------------------------ +//; Purpose : Low level helper functions for Exception handling +//; +//; ----------------------------------------------------------------------------- + +// ; Lowlevel routines systembench environment + .section irq_helper + .text + .align 8 + + .global call_wfi + .global enable_irq + .global enable_fiq + .global disable_irq + + .type call_wfi, @function +call_wfi: + WFI + RET + + + .type enable_irq, @function +enable_irq: + MSR DAIFCLR, #0x2 + RET + + .type enable_fiq, @function +enable_fiq: + MSR DAIFCLR, #0x1 + RET + + + .type disable_irq, @function +disable_irq: + MSR DAIFSET, #0x2 + RET + + diff --git a/software/lib/sw_lib/apps/src/page_table.s b/software/lib/sw_lib/apps/src/page_table.s new file mode 100755 index 0000000000000000000000000000000000000000..6d43fbf1bdc45859f6fa9a0ffd47be6e4c062685 --- /dev/null +++ b/software/lib/sw_lib/apps/src/page_table.s @@ -0,0 +1,1091 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +//------------------------------------------------------------------------------- +// Description: Generates standard level one MMU descriptors for cache tests. +// This file uses the 1MB section format. +// So each entry in this file will map to a 1MB Area. +// Description of important fields: +// +// 31:20 = Section, Top bits of the 1MB area to map. +// 14:12 = TEX +// 3 = Cacheable +// 2 = Bufferable +// +// Examples: +// +// 1c110c02 = Address 1C100000 - 1C200000, Strongly Ordered. Data will not be cached. +// 9ad01c0e = Address 9AD00000 - 9AE00000, Normal, data will be cached. +// +//------------------------------------------------------------------------------- + +/* + SSE-710 Memory Map: + 0x00_0000_0000 16MB Boot register + reserved + 0x00_0100_0000 16MB Reserved + 0x00_0200_0000 32MB Volatile Memory + 0x00_0400_0000 64MB Reserved + 0x00_0800_0000 128MB Non-Volatile Memory + 0x00_1000_0000 160MB Debug + 0x00_1A00_0000 608MB Host Peripherals + 0x00_4000_0000 1GB Host Master Expansion + 0x00_8000_0000 2GB Off-chip Volatile Memory + 0x01_0000_0000 1020GB Reserved +*/ + +// level 1 table +// 512 entry +// 1 entry covers 1 GB + .section PAGE_TABLE_1 + .text + .align 12 + .global pgtbl1 + +pgtbl1 : + .quad (0x0000000000000003 + pgtbl2) //0- 1GB (next table address) + .quad (0x0000000040000401) //1- 2GB (output address) Host Master Expansion - Normal, Inner/Outer WB/WA/RA + .quad (0x0000000080000401) //2- 3GB (output address) Off-chip Volatile Memory - Normal, Inner/Outer WB/WA/RA + .quad (0x00000000c0000401) //3- 4GB (output address) Off-chip Volatile Memory - Normal, Inner/Outer WB/WA/RA + .quad (0x0060000100000409) //4- 5GB (output address) Reserved - MAIR: Device-nGnRnE - XN - AF + .quad (0x0060000140000409) //5- 6GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000180000409) //6- 7GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600001c0000409) //7- 8GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000200000409) //8- 9GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000240000409) //9- 10GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000280000409) //11- 12GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600002c0000409) //12- 13GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000300000409) //13- 14GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000340000409) //14- 15GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000380000409) //15- 16GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600003c0000409) //16- 17GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000400000409) //17- 18GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000440000409) //18- 19GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000480000409) //19- 20GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600004c0000409) //20- 21GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000500000409) //21- 22GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000540000409) //22- 23GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000580000409) //23- 24GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600005c0000409) //24- 25GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000600000409) //25- 26GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000640000409) //26- 27GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000680000409) //27- 28GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600006c0000409) //28- 29GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000700000409) //29- 30GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000740000409) //30- 31GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000780000409) //31- 32GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600007c0000409) //32- 33GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000800000409) //33- 34GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000840000409) //34- 35GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000880000409) //35- 36GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600008c0000409) //36- 37GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000900000409) //37- 38GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000940000409) //38- 39GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000980000409) //39- 40GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600009c0000409) //40- 41GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000a00000409) //41- 42GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000a40000409) //42- 43GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000a80000409) //43- 44GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000ac0000409) //44- 45GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000b00000409) //45- 46GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000b40000409) //46- 47GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000b80000409) //47- 48GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000bc0000409) //48- 49GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000c00000409) //49- 50GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000c40000409) //50- 51GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000c80000409) //51- 52GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000cc0000409) //52- 53GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000d00000409) //53- 54GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000d40000409) //54- 55GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000d80000409) //55- 56GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000dc0000409) //56- 57GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000e00000409) //57- 58GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000e40000409) //58- 59GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000e80000409) //59- 60GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000ec0000409) //60- 61GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000f00000409) //61- 62GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000f40000409) //62- 63GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000f80000409) //63- 64GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060000fc0000409) //64- 65GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001000000409) //65- 66GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001040000409) //66- 67GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001080000409) //67- 68GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600010c0000409) //68- 69GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001100000409) //69- 70GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001140000409) //70- 71GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001180000409) //71- 72GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600011c0000409) //72- 73GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001200000409) //73- 74GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001240000409) //74- 75GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001280000409) //75- 76GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600012c0000409) //76- 77GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001300000409) //77- 78GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001340000409) //78- 79GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001380000409) //79- 80GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600013c0000409) //80- 81GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001400000409) //81- 82GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001440000409) //82- 83GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001480000409) //83- 84GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600014c0000409) //84- 85GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001500000409) //85- 86GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001540000409) //86- 87GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001580000409) //87- 88GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600015c0000409) //88- 89GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001600000409) //89- 90GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001640000409) //90- 91GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001680000409) //91- 92GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600016c0000409) //92- 93GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001700000409) //93- 94GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001740000409) //94- 95GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001780000409) //95- 96GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600017c0000409) //96- 97GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001800000409) //97- 98GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001840000409) //98- 99GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001880000409) //99- 100GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600018c0000409) //100- 101GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001900000409) //101- 102GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001940000409) //102- 103GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001980000409) //103- 104GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600019c0000409) //104- 105GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001a00000409) //105- 106GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001a40000409) //106- 107GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001a80000409) //107- 108GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001ac0000409) //108- 109GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001b00000409) //109- 110GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001b40000409) //110- 111GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001b80000409) //111- 112GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001bc0000409) //112- 113GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001c00000409) //113- 114GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001c40000409) //114- 115GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001c80000409) //115- 116GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001cc0000409) //116- 117GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001d00000409) //117- 118GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001d40000409) //118- 119GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001d80000409) //119- 120GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001dc0000409) //120- 121GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001e00000409) //121- 122GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001e40000409) //122- 123GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001e80000409) //123- 124GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001ec0000409) //124- 125GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001f00000409) //125- 126GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001f40000409) //126- 127GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001f80000409) //127- 128GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060001fc0000409) //128- 129GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002000000409) //129- 130GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002040000409) //130- 131GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002080000409) //131- 132GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600020c0000409) //132- 133GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002100000409) //133- 134GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002140000409) //134- 135GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002180000409) //135- 136GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600021c0000409) //136- 137GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002200000409) //137- 138GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002240000409) //138- 139GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002280000409) //139- 140GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600022c0000409) //140- 141GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002300000409) //141- 142GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002340000409) //142- 143GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002380000409) //143- 144GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600023c0000409) //144- 145GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002400000409) //145- 146GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002440000409) //146- 147GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002480000409) //147- 148GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600024c0000409) //148- 149GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002500000409) //149- 150GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002540000409) //150- 151GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002580000409) //151- 152GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600025c0000409) //152- 153GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002600000409) //153- 154GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002640000409) //154- 155GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002680000409) //155- 156GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600026c0000409) //156- 157GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002700000409) //157- 158GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002740000409) //158- 159GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002780000409) //159- 160GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600027c0000409) //160- 161GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002800000409) //161- 162GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002840000409) //162- 163GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002880000409) //163- 164GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600028c0000409) //164- 165GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002900000409) //165- 166GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002940000409) //166- 167GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002980000409) //167- 168GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600029c0000409) //168- 169GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002a00000409) //169- 170GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002a40000409) //170- 171GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002a80000409) //171- 172GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002ac0000409) //172- 173GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002b00000409) //173- 174GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002b40000409) //174- 175GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002b80000409) //175- 176GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002bc0000409) //176- 177GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002c00000409) //177- 178GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002c40000409) //178- 179GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002c80000409) //179- 180GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002cc0000409) //180- 181GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002d00000409) //181- 182GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002d40000409) //182- 183GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002d80000409) //183- 184GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002dc0000409) //184- 185GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002e00000409) //185- 186GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002e40000409) //186- 187GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002e80000409) //187- 188GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002ec0000409) //188- 189GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002f00000409) //189- 190GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002f40000409) //190- 191GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002f80000409) //191- 192GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060002fc0000409) //192- 193GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003000000409) //193- 194GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003040000409) //194- 195GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003080000409) //195- 196GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600030c0000409) //196- 197GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003100000409) //197- 198GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003140000409) //198- 199GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003180000409) //199- 200GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600031c0000409) //200- 201GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003200000409) //201- 202GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003240000409) //202- 203GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003280000409) //203- 204GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600032c0000409) //204- 205GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003300000409) //205- 206GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003340000409) //206- 207GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003380000409) //207- 208GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600033c0000409) //208- 209GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003400000409) //209- 210GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003440000409) //210- 211GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003480000409) //211- 212GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600034c0000409) //212- 213GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003500000409) //213- 214GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003540000409) //214- 215GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003580000409) //215- 216GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600035c0000409) //216- 217GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003600000409) //217- 218GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003640000409) //218- 219GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003680000409) //219- 220GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600036c0000409) //220- 221GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003700000409) //221- 222GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003740000409) //222- 223GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003780000409) //223- 224GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600037c0000409) //224- 225GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003800000409) //225- 226GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003840000409) //226- 227GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003880000409) //227- 228GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600038c0000409) //228- 229GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003900000409) //229- 230GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003940000409) //230- 231GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003980000409) //231- 232GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600039c0000409) //232- 233GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003a00000409) //233- 234GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003a40000409) //234- 235GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003a80000409) //235- 236GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003ac0000409) //236- 237GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003b00000409) //237- 238GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003b40000409) //238- 239GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003b80000409) //239- 240GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003bc0000409) //240- 241GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003c00000409) //241- 242GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003c40000409) //242- 243GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003c80000409) //243- 244GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003cc0000409) //244- 245GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003d00000409) //245- 246GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003d40000409) //246- 247GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003d80000409) //247- 248GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003dc0000409) //248- 249GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003e00000409) //249- 250GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003e40000409) //250- 251GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003e80000409) //251- 252GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003ec0000409) //252- 253GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003f00000409) //253- 254GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003f40000409) //254- 255GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003f80000409) //255- 256GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060003fc0000409) //256- 257GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004000000409) //257- 258GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004040000409) //258- 259GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004080000409) //259- 260GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600040c0000409) //260- 261GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004100000409) //261- 262GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004140000409) //262- 263GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004180000409) //263- 264GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600041c0000409) //264- 265GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004200000409) //265- 266GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004240000409) //266- 267GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004280000409) //267- 268GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600042c0000409) //268- 269GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004300000409) //269- 270GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004340000409) //270- 271GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004380000409) //271- 272GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600043c0000409) //272- 273GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004400000409) //273- 274GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004440000409) //274- 275GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004480000409) //275- 276GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600044c0000409) //276- 277GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004500000409) //277- 278GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004540000409) //278- 279GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004580000409) //279- 280GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600045c0000409) //280- 281GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004600000409) //281- 282GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004640000409) //282- 283GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004680000409) //283- 284GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600046c0000409) //284- 285GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004700000409) //285- 286GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004740000409) //286- 287GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004780000409) //287- 288GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600047c0000409) //288- 289GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004800000409) //289- 290GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004840000409) //290- 291GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004880000409) //291- 292GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600048c0000409) //292- 293GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004900000409) //293- 294GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004940000409) //294- 295GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004980000409) //295- 296GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600049c0000409) //296- 297GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004a00000409) //297- 298GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004a40000409) //298- 299GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004a80000409) //299- 300GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004ac0000409) //300- 301GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004b00000409) //301- 302GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004b40000409) //302- 303GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004b80000409) //303- 304GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004bc0000409) //304- 305GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004c00000409) //305- 306GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004c40000409) //306- 307GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004c80000409) //307- 308GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004cc0000409) //308- 309GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004d00000409) //309- 310GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004d40000409) //310- 311GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004d80000409) //311- 312GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004dc0000409) //312- 313GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004e00000409) //313- 314GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004e40000409) //314- 315GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004e80000409) //315- 316GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004ec0000409) //316- 317GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004f00000409) //317- 318GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004f40000409) //318- 319GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004f80000409) //319- 320GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060004fc0000409) //320- 321GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005000000409) //321- 322GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005040000409) //322- 323GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005080000409) //323- 324GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600050c0000409) //324- 325GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005100000409) //325- 326GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005140000409) //326- 327GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005180000409) //327- 328GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600051c0000409) //328- 329GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005200000409) //329- 330GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005240000409) //330- 331GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005280000409) //331- 332GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600052c0000409) //332- 333GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005300000409) //333- 334GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005340000409) //334- 335GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005380000409) //335- 336GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600053c0000409) //336- 337GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005400000409) //337- 338GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005440000409) //338- 339GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005480000409) //339- 340GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600054c0000409) //340- 341GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005500000409) //341- 342GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005540000409) //342- 343GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005580000409) //343- 344GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600055c0000409) //344- 345GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005600000409) //345- 346GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005640000409) //346- 347GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005680000409) //347- 348GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600056c0000409) //348- 349GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005700000409) //349- 350GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005740000409) //350- 351GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005780000409) //351- 352GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600057c0000409) //352- 353GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005800000409) //353- 354GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005840000409) //354- 355GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005880000409) //355- 356GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600058c0000409) //356- 357GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005900000409) //357- 358GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005940000409) //358- 359GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005980000409) //359- 360GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600059c0000409) //360- 361GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005a00000409) //361- 362GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005a40000409) //362- 363GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005a80000409) //363- 364GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005ac0000409) //364- 365GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005b00000409) //365- 366GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005b40000409) //366- 367GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005b80000409) //367- 368GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005bc0000409) //368- 369GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005c00000409) //369- 370GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005c40000409) //370- 371GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005c80000409) //371- 372GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005cc0000409) //372- 373GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005d00000409) //373- 374GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005d40000409) //374- 375GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005d80000409) //375- 376GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005dc0000409) //376- 377GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005e00000409) //377- 378GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005e40000409) //378- 379GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005e80000409) //379- 380GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005ec0000409) //380- 381GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005f00000409) //381- 382GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005f40000409) //382- 383GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005f80000409) //383- 384GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060005fc0000409) //384- 385GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006000000409) //385- 386GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006040000409) //386- 387GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006080000409) //387- 388GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600060c0000409) //388- 389GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006100000409) //389- 390GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006140000409) //390- 391GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006180000409) //391- 392GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600061c0000409) //392- 393GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006200000409) //393- 394GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006240000409) //394- 395GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006280000409) //395- 396GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600062c0000409) //396- 397GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006300000409) //397- 398GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006340000409) //398- 399GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006380000409) //399- 400GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600063c0000409) //400- 401GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006400000409) //401- 402GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006440000409) //402- 403GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006480000409) //403- 404GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600064c0000409) //404- 405GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006500000409) //405- 406GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006540000409) //406- 407GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006580000409) //407- 408GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600065c0000409) //408- 409GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006600000409) //409- 410GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006640000409) //410- 411GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006680000409) //411- 412GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600066c0000409) //412- 413GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006700000409) //413- 414GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006740000409) //414- 415GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006780000409) //415- 416GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600067c0000409) //416- 417GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006800000409) //417- 418GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006840000409) //418- 419GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006880000409) //419- 420GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600068c0000409) //420- 421GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006900000409) //421- 422GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006940000409) //422- 423GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006980000409) //423- 424GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600069c0000409) //424- 425GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006a00000409) //425- 426GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006a40000409) //426- 427GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006a80000409) //427- 428GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006ac0000409) //428- 429GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006b00000409) //429- 430GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006b40000409) //430- 431GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006b80000409) //431- 432GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006bc0000409) //432- 433GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006c00000409) //433- 434GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006c40000409) //434- 435GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006c80000409) //435- 436GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006cc0000409) //436- 437GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006d00000409) //437- 438GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006d40000409) //438- 439GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006d80000409) //439- 440GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006dc0000409) //440- 441GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006e00000409) //441- 442GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006e40000409) //442- 443GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006e80000409) //443- 444GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006ec0000409) //444- 445GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006f00000409) //445- 446GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006f40000409) //446- 447GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006f80000409) //447- 448GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060006fc0000409) //448- 449GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007000000409) //449- 450GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007040000409) //450- 451GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007080000409) //451- 452GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600070c0000409) //452- 453GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007100000409) //453- 454GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007140000409) //454- 455GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007180000409) //455- 456GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600071c0000409) //456- 457GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007200000409) //457- 458GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007240000409) //458- 459GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007280000409) //459- 460GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600072c0000409) //460- 461GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007300000409) //461- 462GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007340000409) //462- 463GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007380000409) //463- 464GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600073c0000409) //464- 465GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007400000409) //465- 466GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007440000409) //466- 467GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007480000409) //467- 468GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600074c0000409) //468- 469GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007500000409) //469- 470GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007540000409) //470- 471GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007580000409) //471- 472GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600075c0000409) //472- 473GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007600000409) //473- 474GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007640000409) //474- 475GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007680000409) //475- 476GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600076c0000409) //476- 477GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007700000409) //477- 478GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007740000409) //478- 479GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007780000409) //479- 480GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600077c0000409) //480- 481GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007800000409) //481- 482GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007840000409) //482- 483GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007880000409) //483- 484GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600078c0000409) //484- 485GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007900000409) //485- 486GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007940000409) //486- 487GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007980000409) //487- 488GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x00600079c0000409) //488- 489GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007a00000409) //489- 490GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007a40000409) //490- 491GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007a80000409) //491- 492GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007ac0000409) //492- 493GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007b00000409) //493- 494GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007b40000409) //494- 495GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007b80000409) //495- 496GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007bc0000409) //496- 497GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007c00000409) //497- 498GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007c40000409) //498- 499GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007c80000409) //499- 500GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007cc0000409) //500- 501GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007d00000409) //501- 502GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007d40000409) //502- 503GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007d80000409) //502- 503GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007dc0000409) //503- 504GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007e00000409) //504- 505GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007e40000409) //505- 506GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007e80000409) //506- 507GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007ec0000409) //507- 508GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007f00000409) //508- 509GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007f40000409) //509- 510GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007f80000409) //510- 511GB (output address) Reserved - MAIR: Device-nGnRnE + .quad (0x0060007fc0000409) //511- 512GB (output address) Reserved - MAIR: Device-nGnRnE + +// level 2 table +// 512 entry +// 1 entry covers 2MB + .section PAGE_TABLE_2 + .text + .align 12 + .global pgtbl2 + +pgtbl2: + .quad (0x0000000000000401) // 0-2MB boot register only occupaies 4k! + .quad (0x0040000000200409) // 2-4MB Reseved -Execute never - MAIR: Device-nGnRnE + .quad (0x0040000000400409) // 4-6MB + .quad (0x0040000000600409) // 6-8MB + .quad (0x0040000000800409) // 8-10MB + .quad (0x0040000000a00409) // 10-12MB + .quad (0x0040000000c00409) // 12-14MB + .quad (0x0040000000e00409) // 14-16MB + .quad (0x0040000001000409) // 16-18MB + .quad (0x0040000001200409) // 18-20MB + .quad (0x0040000001400409) // 20-22MB + .quad (0x0040000001600409) // 22-24MB + .quad (0x0040000001800409) // 24-26MB + .quad (0x0040000001a00409) // 26-28MB + .quad (0x0040000001c00409) // 28-30MB + .quad (0x0040000001e00409) // 30-32MB + .quad (0x0000000002000401) // 32-34MB Volatile Memory - Normal, Inner/Outer WB/WA/RA + .quad (0x0000000002200401) // 34-36MB + .quad (0x0000000002400401) // 36-38MB + .quad (0x0000000002600401) // 38-40MB + .quad (0x0000000002800401) // 40-42MB + .quad (0x0000000002a00401) // 42-44MB + .quad (0x0000000002c00401) // 44-46MB + .quad (0x0000000002e00401) // 46-48MB + .quad (0x0000000003000401) // 48-50MB + .quad (0x0000000003200401) // 50-52MB + .quad (0x0000000003400401) // 52-54MB + .quad (0x0000000003600401) // 54-56MB + .quad (0x0000000003800401) // 56-58MB + .quad (0x0000000003a00401) // 58-60MB + .quad (0x0000000003c00401) // 60-62MB + .quad (0x0000000003e00401) // 62-64MB + .quad (0x0040000004000409) // 64-66MB Reserved + .quad (0x0040000004200409) // 66-68MB + .quad (0x0040000004400409) // 68-70MB + .quad (0x0040000004600409) // 70-72MB + .quad (0x0040000004800409) // 72-74MB + .quad (0x0040000004a00409) // 74-76MB + .quad (0x0040000004c00409) // 76-78MB + .quad (0x0040000004e00409) // 78-80MB + .quad (0x0040000005000409) // 80-82MB + .quad (0x0040000005200409) // 82-84MB + .quad (0x0040000005400409) // 84-86MB + .quad (0x0040000005600409) // 86-88MB + .quad (0x0040000005800409) // 88-90MB + .quad (0x0040000005a00409) // 90-92MB + .quad (0x0040000005c00409) // 92-94MB + .quad (0x0040000005e00409) // 94-96MB + .quad (0x0040000006000409) // 96-98MB + .quad (0x0040000006200409) // 98-100MB + .quad (0x0040000006400409) // 100-102MB + .quad (0x0040000006600409) // 102-104MB + .quad (0x0040000006800409) // 104-106MB + .quad (0x0040000006a00409) // 106-108MB + .quad (0x0040000006c00409) // 108-110MB + .quad (0x0040000006e00409) // 110-112MB + .quad (0x0040000007000409) // 112-114MB + .quad (0x0040000007200409) // 114-116MB + .quad (0x0040000007400409) // 116-118MB + .quad (0x0040000007600409) // 118-120MB + .quad (0x0040000007800409) // 120-122MB + .quad (0x0040000007a00409) // 122-124MB + .quad (0x0040000007c00409) // 124-126MB + .quad (0x0040000007e00409) // 126-128MB + .quad (0x0000000008000401) // 128-130MB Non_volatile Memory + .quad (0x0000000008200401) // 130-132MB + .quad (0x0000000008400401) // 132-134MB + .quad (0x0000000008600401) // 134-136MB + .quad (0x0000000008800401) // 136-138MB + .quad (0x0000000008a00401) // 138-140MB + .quad (0x0000000008c00401) // 140-142MB + .quad (0x0000000008e00401) // 142-144MB + .quad (0x0000000009000401) // 144-146MB + .quad (0x0000000009200401) // 146-148MB + .quad (0x0000000009400401) // 148-150MB + .quad (0x0000000009600401) // 150-152MB + .quad (0x0000000009800401) // 152-154MB + .quad (0x0000000009a00401) // 154-156MB + .quad (0x0000000009c00401) // 156-158MB + .quad (0x0000000009e00401) // 158-160MB + .quad (0x000000000a000401) // 160-162MB + .quad (0x000000000a200401) // 162-164MB + .quad (0x000000000a400401) // 164-166MB + .quad (0x000000000a600401) // 166-168MB + .quad (0x000000000a800401) // 168-170MB + .quad (0x000000000aa00401) // 170-172MB + .quad (0x000000000ac00401) // 172-174MB + .quad (0x000000000ae00401) // 174-176MB + .quad (0x000000000b000401) // 176-178MB + .quad (0x000000000b200401) // 178-180MB + .quad (0x000000000b400401) // 180-182MB + .quad (0x000000000b600401) // 182-184MB + .quad (0x000000000b800401) // 184-186MB + .quad (0x000000000ba00401) // 186-188MB + .quad (0x000000000bc00401) // 188-190MB + .quad (0x000000000be00401) // 190-192MB + .quad (0x000000000c000401) // 192-194MB + .quad (0x000000000c200401) // 194-196MB + .quad (0x000000000c400401) // 196-198MB + .quad (0x000000000c600401) // 198-200MB + .quad (0x000000000c800401) // 200-202MB + .quad (0x000000000ca00401) // 202-204MB + .quad (0x000000000cc00401) // 204-206MB + .quad (0x000000000ce00401) // 206-208MB + .quad (0x000000000d000401) // 208-210MB + .quad (0x000000000d200401) // 210-212MB + .quad (0x000000000d400401) // 212-214MB + .quad (0x000000000d600401) // 214-216MB + .quad (0x000000000d800401) // 216-218MB + .quad (0x000000000da00401) // 218-220MB + .quad (0x000000000dc00401) // 220-222MB + .quad (0x000000000de00401) // 222-224MB + .quad (0x000000000e000401) // 224-226MB + .quad (0x000000000e200401) // 226-228MB + .quad (0x000000000e400401) // 228-230MB + .quad (0x000000000e600401) // 230-232MB + .quad (0x000000000e800401) // 232-234MB + .quad (0x000000000ea00401) // 234-236MB + .quad (0x000000000ec00401) // 236-238MB + .quad (0x000000000ee00401) // 238-240MB + .quad (0x000000000f000401) // 240-242MB + .quad (0x000000000f200401) // 242-244MB + .quad (0x000000000f400401) // 244-246MB + .quad (0x000000000f600401) // 246-248MB + .quad (0x000000000f800401) // 248-250MB + .quad (0x000000000fa00401) // 250-252MB + .quad (0x000000000fc00401) // 252-254MB + .quad (0x000000000fe00401) // 254-256MB + .quad (0x0000000010000409) // 256-258MB Debug + .quad (0x0040000010200409) // 258-260MB + .quad (0x0040000010400409) // 260-262MB + .quad (0x0040000010600409) // 262-264MB + .quad (0x0040000010800409) // 264-266MB + .quad (0x0040000010a00409) // 266-268MB + .quad (0x0040000010c00409) // 268-270MB + .quad (0x0040000010e00409) // 270-272MB + .quad (0x0040000011000409) // 272-274MB + .quad (0x0040000011200409) // 274-276MB + .quad (0x0040000011400409) // 276-278MB + .quad (0x0040000011600409) // 278-280MB + .quad (0x0040000011800409) // 280-282MB + .quad (0x0040000011a00409) // 282-284MB + .quad (0x0040000011c00409) // 284-286MB + .quad (0x0040000011e00409) // 286-288MB + .quad (0x0040000012000409) // 288-290MB + .quad (0x0040000012200409) // 290-292MB + .quad (0x0040000012400409) // 292-294MB + .quad (0x0040000012600409) // 294-296MB + .quad (0x0040000012800409) // 296-298MB + .quad (0x0040000012a00409) // 298-300MB + .quad (0x0040000012c00409) // 300-302MB + .quad (0x0040000012e00409) // 302-304MB + .quad (0x0040000013000409) // 304-306MB + .quad (0x0040000013200409) // 306-308MB + .quad (0x0040000013400409) // 308-310MB + .quad (0x0040000013600409) // 310-312MB + .quad (0x0040000013800409) // 312-314MB + .quad (0x0040000013a00409) // 314-316MB + .quad (0x0040000013c00409) // 316-318MB + .quad (0x0040000013e00409) // 318-320MB + .quad (0x0040000014000409) // 320-322MB + .quad (0x0040000014200409) // 322-324MB + .quad (0x0040000014400409) // 324-326MB + .quad (0x0040000014600409) // 326-328MB + .quad (0x0040000014800409) // 328-330MB + .quad (0x0040000014a00409) // 330-332MB + .quad (0x0040000014c00409) // 332-334MB + .quad (0x0040000014e00409) // 334-336MB + .quad (0x0040000015000409) // 336-338MB + .quad (0x0040000015200409) // 338-340MB + .quad (0x0040000015400409) // 340-342MB + .quad (0x0040000015600409) // 342-344MB + .quad (0x0040000015800409) // 344-346MB + .quad (0x0040000015a00409) // 346-348MB + .quad (0x0040000015c00409) // 348-350MB + .quad (0x0040000015e00409) // 350-352MB + .quad (0x0040000016000409) // 352-354MB + .quad (0x0040000016200409) // 354-356MB + .quad (0x0040000016400409) // 356-358MB + .quad (0x0040000016600409) // 358-360MB + .quad (0x0040000016800409) // 360-362MB + .quad (0x0040000016a00409) // 362-364MB + .quad (0x0040000016c00409) // 364-366MB + .quad (0x0040000016e00409) // 366-368MB + .quad (0x0040000017000409) // 368-370MB + .quad (0x0040000017200409) // 370-372MB + .quad (0x0040000017400409) // 372-374MB + .quad (0x0040000017600409) // 374-376MB + .quad (0x0040000017800409) // 376-378MB + .quad (0x0040000017a00409) // 378-380MB + .quad (0x0040000017c00409) // 380-382MB + .quad (0x0040000017e00409) // 382-384MB + .quad (0x0040000018000409) // 384-386MB + .quad (0x0040000018200409) // 386-388MB + .quad (0x0040000018400409) // 388-390MB + .quad (0x0040000018600409) // 390-392MB + .quad (0x0040000018800409) // 392-394MB + .quad (0x0040000018a00409) // 394-396MB + .quad (0x0040000018c00409) // 396-398MB + .quad (0x0040000018e00409) // 398-400MB + .quad (0x0040000019000409) // 400-402MB + .quad (0x0040000019200409) // 402-404MB + .quad (0x0040000019400409) // 404-406MB + .quad (0x0040000019600409) // 406-408MB + .quad (0x0040000019800409) // 408-410MB + .quad (0x0040000019a00409) // 410-412MB + .quad (0x0040000019c00409) // 412-414MB + .quad (0x0040000019e00409) // 414-416MB + .quad (0x004000001a000409) // 416-418MB Host Preipherials + .quad (0x004000001a200409) // 418-420MB + .quad (0x004000001a400409) // 420-422MB + .quad (0x004000001a600409) // 422-424MB + .quad (0x004000001a800409) // 424-426MB + .quad (0x004000001aa00409) // 426-428MB + .quad (0x004000001ac00409) // 428-430MB + .quad (0x004000001ae00409) // 430-432MB + .quad (0x004000001b000409) // 432-434MB + .quad (0x004000001b200409) // 434-436MB + .quad (0x004000001b400409) // 436-438MB + .quad (0x004000001b600409) // 438-440MB + .quad (0x004000001b800409) // 440-442MB + .quad (0x004000001ba00409) // 442-444MB + .quad (0x004000001bc00409) // 444-446MB + .quad (0x004000001be00409) // 446-448MB + .quad (0x004000001c000409) // 448-450MB + .quad (0x004000001c200409) // 450-452MB + .quad (0x004000001c400409) // 452-454MB + .quad (0x004000001c600409) // 454-456MB + .quad (0x004000001c800409) // 456-458MB + .quad (0x004000001ca00409) // 458-460MB + .quad (0x004000001cc00409) // 460-462MB + .quad (0x004000001ce00409) // 462-464MB + .quad (0x004000001d000409) // 464-466MB + .quad (0x004000001d200409) // 466-468MB + .quad (0x004000001d400409) // 468-470MB + .quad (0x004000001d600409) // 470-472MB + .quad (0x004000001d800409) // 472-474MB + .quad (0x004000001da00409) // 474-476MB + .quad (0x004000001dc00409) // 476-478MB + .quad (0x004000001de00409) // 478-480MB + .quad (0x004000001e000409) // 480-482MB + .quad (0x004000001e200409) // 482-484MB + .quad (0x004000001e400409) // 484-486MB + .quad (0x004000001e600409) // 486-488MB + .quad (0x004000001e800409) // 488-490MB + .quad (0x004000001ea00409) // 490-492MB + .quad (0x004000001ec00409) // 492-494MB + .quad (0x004000001ee00409) // 494-496MB + .quad (0x004000001f000409) // 496-498MB + .quad (0x004000001f200409) // 498-500MB + .quad (0x004000001f400409) // 500-502MB + .quad (0x004000001f600409) // 502-504MB + .quad (0x004000001f800409) // 504-506MB + .quad (0x004000001fa00409) // 506-508MB + .quad (0x004000001fc00409) // 508-510MB + .quad (0x004000001fe00409) // 510-512MB + .quad (0x0040000020000409) // 512-514MB + .quad (0x0040000020200409) // 514-516MB + .quad (0x0040000020400409) // 516-518MB + .quad (0x0040000020600409) // 518-520MB + .quad (0x0040000020800409) // 520-522MB + .quad (0x0040000020a00409) // 522-524MB + .quad (0x0040000020c00409) // 524-526MB + .quad (0x0040000020e00409) // 526-528MB + .quad (0x0040000021000409) // 528-530MB + .quad (0x0040000021200409) // 530-532MB + .quad (0x0040000021400409) // 532-534MB + .quad (0x0040000021600409) // 534-536MB + .quad (0x0040000021800409) // 536-538MB + .quad (0x0040000021a00409) // 538-540MB + .quad (0x0040000021c00409) // 540-542MB + .quad (0x0040000021e00409) // 542-544MB + .quad (0x0040000022000409) // 544-546MB + .quad (0x0040000022200409) // 546-548MB + .quad (0x0040000022400409) // 548-550MB + .quad (0x0040000022600409) // 550-552MB + .quad (0x0040000022800409) // 552-554MB + .quad (0x0040000022a00409) // 554-556MB + .quad (0x0040000022c00409) // 556-558MB + .quad (0x0040000022e00409) // 558-560MB + .quad (0x0040000023000409) // 560-562MB + .quad (0x0040000023200409) // 562-564MB + .quad (0x0040000023400409) // 564-566MB + .quad (0x0040000023600409) // 566-568MB + .quad (0x0040000023800409) // 568-570MB + .quad (0x0040000023a00409) // 570-572MB + .quad (0x0040000023c00409) // 572-574MB + .quad (0x0040000023e00409) // 574-576MB + .quad (0x0040000024000409) // 576-578MB + .quad (0x0040000024200409) // 578-580MB + .quad (0x0040000024400409) // 580-582MB + .quad (0x0040000024600409) // 582-584MB + .quad (0x0040000024800409) // 584-586MB + .quad (0x0040000024a00409) // 586-588MB + .quad (0x0040000024c00409) // 588-590MB + .quad (0x0040000024e00409) // 590-592MB + .quad (0x0040000025000409) // 592-594MB + .quad (0x0040000025200409) // 594-596MB + .quad (0x0040000025400409) // 596-598MB + .quad (0x0040000025600409) // 598-600MB + .quad (0x0040000025800409) // 600-602MB + .quad (0x0040000025a00409) // 602-604MB + .quad (0x0040000025c00409) // 604-606MB + .quad (0x0040000025e00409) // 606-608MB + .quad (0x0040000026000409) // 608-610MB + .quad (0x0040000026200409) // 610-612MB + .quad (0x0040000026400409) // 612-614MB + .quad (0x0040000026600409) // 614-616MB + .quad (0x0040000026800409) // 616-618MB + .quad (0x0040000026a00409) // 618-620MB + .quad (0x0040000026c00409) // 620-622MB + .quad (0x0040000026e00409) // 622-624MB + .quad (0x0040000027000409) // 624-626MB + .quad (0x0040000027200409) // 626-628MB + .quad (0x0040000027400409) // 628-630MB + .quad (0x0040000027600409) // 630-632MB + .quad (0x0040000027800409) // 632-634MB + .quad (0x0040000027a00409) // 634-636MB + .quad (0x0040000027c00409) // 636-638MB + .quad (0x0040000027e00409) // 638-640MB + .quad (0x0040000028000409) // 640-642MB + .quad (0x0040000028200409) // 642-644MB + .quad (0x0040000028400409) // 644-646MB + .quad (0x0040000028600409) // 646-648MB + .quad (0x0040000028800409) // 648-650MB + .quad (0x0040000028a00409) // 650-652MB + .quad (0x0040000028c00409) // 652-654MB + .quad (0x0040000028e00409) // 654-656MB + .quad (0x0040000029000409) // 656-658MB + .quad (0x0040000029200409) // 658-660MB + .quad (0x0040000029400409) // 660-662MB + .quad (0x0040000029600409) // 662-664MB + .quad (0x0040000029800409) // 664-666MB + .quad (0x0040000029a00409) // 666-668MB + .quad (0x0040000029c00409) // 668-670MB + .quad (0x0040000029e00409) // 670-672MB + .quad (0x004000002a000409) // 672-674MB + .quad (0x004000002a200409) // 674-676MB + .quad (0x004000002a400409) // 676-678MB + .quad (0x004000002a600409) // 678-680MB + .quad (0x004000002a800409) // 680-682MB + .quad (0x004000002aa00409) // 682-684MB + .quad (0x004000002ac00409) // 684-686MB + .quad (0x004000002ae00409) // 686-688MB + .quad (0x004000002b000409) // 688-690MB + .quad (0x004000002b200409) // 690-692MB + .quad (0x004000002b400409) // 692-694MB + .quad (0x004000002b600409) // 694-696MB + .quad (0x004000002b800409) // 696-698MB + .quad (0x004000002ba00409) // 698-700MB + .quad (0x004000002bc00409) // 700-702MB + .quad (0x004000002be00409) // 702-704MB + .quad (0x004000002c000409) // 704-706MB + .quad (0x004000002c200409) // 706-708MB + .quad (0x004000002c400409) // 708-710MB + .quad (0x004000002c600409) // 710-712MB + .quad (0x004000002c800409) // 712-714MB + .quad (0x004000002ca00409) // 714-716MB + .quad (0x004000002cc00409) // 716-718MB + .quad (0x004000002ce00409) // 718-720MB + .quad (0x004000002d000409) // 720-722MB + .quad (0x004000002d200409) // 722-724MB + .quad (0x004000002d400409) // 724-726MB + .quad (0x004000002d600409) // 726-728MB + .quad (0x004000002d800409) // 728-730MB + .quad (0x004000002da00409) // 730-732MB + .quad (0x004000002dc00409) // 732-734MB + .quad (0x004000002de00409) // 734-736MB + .quad (0x004000002e000409) // 736-738MB + .quad (0x004000002e200409) // 738-740MB + .quad (0x004000002e400409) // 740-742MB + .quad (0x004000002e600409) // 742-744MB + .quad (0x004000002e800409) // 744-746MB + .quad (0x004000002ea00409) // 746-748MB + .quad (0x004000002ec00409) // 748-750MB + .quad (0x004000002ee00409) // 750-752MB + .quad (0x004000002f000409) // 752-754MB + .quad (0x004000002f200409) // 754-756MB + .quad (0x004000002f400409) // 756-758MB + .quad (0x004000002f600409) // 758-760MB + .quad (0x004000002f800409) // 760-762MB + .quad (0x004000002fa00409) // 762-764MB + .quad (0x004000002fc00409) // 764-766MB + .quad (0x004000002fe00409) // 766-768MB + .quad (0x0040000030000409) // 768-770MB + .quad (0x0040000030200409) // 770-772MB + .quad (0x0040000030400409) // 772-774MB + .quad (0x0040000030600409) // 774-776MB + .quad (0x0040000030800409) // 776-778MB + .quad (0x0040000030a00409) // 778-780MB + .quad (0x0040000030c00409) // 780-782MB + .quad (0x0040000030e00409) // 782-784MB + .quad (0x0040000031000409) // 784-786MB + .quad (0x0040000031200409) // 786-788MB + .quad (0x0040000031400409) // 788-790MB + .quad (0x0040000031600409) // 790-792MB + .quad (0x0040000031800409) // 792-794MB + .quad (0x0040000031a00409) // 794-796MB + .quad (0x0040000031c00409) // 796-798MB + .quad (0x0040000031e00409) // 798-800MB + .quad (0x0040000032000409) // 800-802MB + .quad (0x0040000032200409) // 802-804MB + .quad (0x0040000032400409) // 804-806MB + .quad (0x0040000032600409) // 806-808MB + .quad (0x0040000032800409) // 808-810MB + .quad (0x0040000032a00409) // 810-812MB + .quad (0x0040000032c00409) // 812-814MB + .quad (0x0040000032e00409) // 814-816MB + .quad (0x0040000033000409) // 816-818MB + .quad (0x0040000033200409) // 818-820MB + .quad (0x0040000033400409) // 820-822MB + .quad (0x0040000033600409) // 822-824MB + .quad (0x0040000033800409) // 824-826MB + .quad (0x0040000033a00409) // 826-828MB + .quad (0x0040000033c00409) // 828-830MB + .quad (0x0040000033e00409) // 830-832MB + .quad (0x0040000034000409) // 832-834MB + .quad (0x0040000034200409) // 834-836MB + .quad (0x0040000034400409) // 836-838MB + .quad (0x0040000034600409) // 838-840MB + .quad (0x0040000034800409) // 840-842MB + .quad (0x0040000034a00409) // 842-844MB + .quad (0x0040000034c00409) // 844-846MB + .quad (0x0040000034e00409) // 846-848MB + .quad (0x0040000035000409) // 848-850MB + .quad (0x0040000035200409) // 850-852MB + .quad (0x0040000035400409) // 852-854MB + .quad (0x0040000035600409) // 854-856MB + .quad (0x0040000035800409) // 856-858MB + .quad (0x0040000035a00409) // 858-860MB + .quad (0x0040000035c00409) // 860-862MB + .quad (0x0040000035e00409) // 862-864MB + .quad (0x0040000036000409) // 864-866MB + .quad (0x0040000036200409) // 866-868MB + .quad (0x0040000036400409) // 868-870MB + .quad (0x0040000036600409) // 870-872MB + .quad (0x0040000036800409) // 872-874MB + .quad (0x0040000036a00409) // 874-876MB + .quad (0x0040000036c00409) // 876-878MB + .quad (0x0040000036e00409) // 878-880MB + .quad (0x0040000037000409) // 880-882MB + .quad (0x0040000037200409) // 882-884MB + .quad (0x0040000037400409) // 884-886MB + .quad (0x0040000037600409) // 886-888MB + .quad (0x0040000037800409) // 888-890MB + .quad (0x0040000037a00409) // 890-892MB + .quad (0x0040000037c00409) // 892-894MB + .quad (0x0040000037e00409) // 894-896MB + .quad (0x0040000038000409) // 896-898MB + .quad (0x0040000038200409) // 898-900MB + .quad (0x0040000038400409) // 900-902MB + .quad (0x0040000038600409) // 902-904MB + .quad (0x0040000038800409) // 904-906MB + .quad (0x0040000038a00409) // 906-908MB + .quad (0x0040000038c00409) // 908-910MB + .quad (0x0040000038e00409) // 910-912MB + .quad (0x0040000039000409) // 912-914MB + .quad (0x0040000039200409) // 914-916MB + .quad (0x0040000039400409) // 916-918MB + .quad (0x0040000039600409) // 918-920MB + .quad (0x0040000039800409) // 920-922MB + .quad (0x0040000039a00409) // 922-924MB + .quad (0x0040000039c00409) // 924-926MB + .quad (0x0040000039e00409) // 926-928MB + .quad (0x004000003a000409) // 928-930MB + .quad (0x004000003a200409) // 930-932MB + .quad (0x004000003a400409) // 932-934MB + .quad (0x004000003a600409) // 934-936MB + .quad (0x004000003a800409) // 936-938MB + .quad (0x004000003aa00409) // 938-940MB + .quad (0x004000003ac00409) // 940-942MB + .quad (0x004000003ae00409) // 942-944MB + .quad (0x004000003b000409) // 944-946MB + .quad (0x004000003b200409) // 946-948MB + .quad (0x004000003b400409) // 948-950MB + .quad (0x004000003b600409) // 950-952MB + .quad (0x004000003b800409) // 952-954MB + .quad (0x004000003ba00409) // 954-956MB + .quad (0x004000003bc00409) // 956-958MB + .quad (0x004000003be00409) // 958-960MB + .quad (0x004000003c000409) // 960-962MB + .quad (0x004000003c200409) // 962-964MB + .quad (0x004000003c400409) // 964-966MB + .quad (0x004000003c600409) // 966-968MB + .quad (0x004000003c800409) // 968-970MB + .quad (0x004000003ca00409) // 970-972MB + .quad (0x004000003cc00409) // 972-974MB + .quad (0x004000003ce00409) // 974-976MB + .quad (0x004000003d000409) // 976-978MB + .quad (0x004000003d200409) // 978-980MB + .quad (0x004000003d400409) // 980-982MB + .quad (0x004000003d600409) // 982-984MB + .quad (0x004000003d800409) // 984-986MB + .quad (0x004000003da00409) // 986-988MB + .quad (0x004000003dc00409) // 988-990MB + .quad (0x004000003de00409) // 990-992MB + .quad (0x004000003e000409) // 992-994MB + .quad (0x004000003e200409) // 994-996MB + .quad (0x004000003e400409) // 996-998MB + .quad (0x004000003e600409) // 998-1000MB + .quad (0x004000003e800409) // 1000-1002MB + .quad (0x004000003ea00409) // 1002-1004MB + .quad (0x004000003ec00409) // 1004-1006MB + .quad (0x004000003ee00409) // 1006-1008MB + .quad (0x004000003f000409) // 1008-1010MB + .quad (0x004000003f200409) // 1010-1012MB + .quad (0x004000003f400409) // 1012-1014MB + .quad (0x004000003f600409) // 1014-1016MB + .quad (0x004000003f800409) // 1016-1018MB + .quad (0x004000003fa00409) // 1018-1020MB + .quad (0x004000003fc00409) // 1020-1022MB + .quad (0x004000003fe00409) // 1022-1024MB + diff --git a/software/lib/sw_lib/apps/src/platform.c b/software/lib/sw_lib/apps/src/platform.c new file mode 100755 index 0000000000000000000000000000000000000000..b8d05f6ec26cf7f6d6719d5ab66baa19d4d26134 --- /dev/null +++ b/software/lib/sw_lib/apps/src/platform.c @@ -0,0 +1,139 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#include "system.h" +#include "platform.h" +#include "intrinsics.h" + +// This function handles the aarch64 exec. state async abort +void sync_ext_abort_handler(void) +{ + abort_handler(); +} + +// Enable instruction cache in aarch64 state (EL3) +void enable_icaches(void){ + + __ASM volatile ( "ic iallu \n\t" //; Invalidate I cache and BTAC + "dsb sy \n\t" + "isb \n\t" + "mrs x0, sctlr_el3 \n\t" + "mov x1, #0x1000 \n\t" //; Turn on caches + "orr x0, x0, x1 \n\t" + "msr SCTLR_EL3, x0 \n\t" + "isb \n\t" ::: "x0", "x1"); + +} + +// Disable caches in aarch64 state (EL3) +void disable_caches(void){ + __ASM volatile( + "mrs x0, sctlr_el3 \n\t" + "mov x1, #0x1004 \n\t" //; Turn on caches + "BIC x0, x0, x1 \n\t" + "msr SCTLR_EL3, x0 \n\t" + "ISB \n\t" ::: "x0", "x1"); +} + +/**----------------------------------------------------------------------------- + Function name : disable_smp + Input Parameters : void + Return Type : void + Descritpion : Disables SMP bit, only applicable for v8 Cores +------------------------------------------------------------------------------*/ +void disable_smp(void) +{ + __ASM volatile ( "mov x1, #0x40 \n\t" + "mrs x0, S3_1_c15_c2_1 \n\t" + "bic x0, x0, x1 \n\t" + "msr S3_1_c15_c2_1, x0 \n\t" :::"x0", "x1"); +} + +void invalidate_l2_caches(void){ +// invalidate_caches(); +//Programmer's Guide for ARMv8-A, +//Example 11-3 Cleaning to Point of Coherency +__ASM volatile ( "MRS X0, CLIDR_EL1 \n\t" + "AND W3, W0, #0x07000000 \n\t" // Get 2 x Level of Coherence + "LSR W3, W3, #23 \n\t" + "CBZ W3, Finished \n\t" + "MOV W10, #0 \n\t" // W10 = 2 x cache level + "MOV W8, #1 \n\t" // W8 = constant 0b1 + "Loop1: ADD W2, W10, W10, LSR #1 \n\t"// Calculate 3 x cache level + "LSR W1, W0, W2 \n\t" // extract 3-bit cache type for this level + "AND W1, W1, #0x7 \n\t" + "CMP W1, #2 \n\t" + "BLT Skip \n\t" // No data or unified cache at this level + "MSR CSSELR_EL1, X10 \n\t" // Select this cache level + "ISB \n\t" // Synchronize change of CSSELR + "MRS X1, CCSIDR_EL1 \n\t" // Read CCSIDR + "AND W2, W1, #7 \n\t" // W2 = log2(linelen)-4 + "ADD W2, W2, #4 \n\t" // W2 = log2(linelen) + "UBFX W4, W1, #3, #10 \n\t" // W4 = max way number, right aligned + "CLZ W5, W4 \n\t" /* W5 = 32-log2(ways), bit position of way in DC operand */ + "LSL W9, W4, W5 \n\t" /* W9 = max way number, aligned to position in DC operand */ + "LSL W16, W8, W5 \n\t" // W16 = amount to decrement way number per iteration + "Loop2: UBFX W7, W1, #13, #15 \n\t" // W7 = max set number, right aligned + "LSL W7, W7, W2 \n\t" /* W7 = max set number, aligned to position in DC operand */ + "LSL W17, W8, W2 \n\t" // W17 = amount to decrement set number per iteration + "Loop3: ORR W11, W10, W9 \n\t" // W11 = combine way number and cache number... + "ORR W11, W11, W7 \n\t" // ... and set number for DC operand + "DC CSW, X11 \n\t" // Do data cache clean by set and way + "SUBS W7, W7, W17 \n\t" // Decrement set number + "BGE Loop3 \n\t" + "SUBS X9, X9, X16 \n\t" // Decrement way number + "BGE Loop2 \n\t" + "Skip: ADD W10, W10, #2 \n\t" // Increment 2 x cache level + "CMP W3, W10 \n\t" + "DSB sy \n\t" /* Ensure completion of previous cache maintenance operation */ + "BGT Loop1 \n\t" + "Finished: \n\t":::"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12"); +} + + +uint32_t get_sctlr_val(void){ + uint32_t sctlr_val; + __ASM volatile("mrs %x[output], sctlr_el3\n\t" + :[output] "=r" (sctlr_val)::); + + return sctlr_val; +} + + + + +void write_sctlr_val(uint32_t sctlr_val){ + + __ASM volatile("msr SCTLR_EL3, %x[input] \n\t" + "dsb SY \n\t" + "isb \n\t" ::[input] "r" (sctlr_val):); + +} + +/**----------------------------------------------------------------------------- + Function name : clear_OS_LOCK + Input Parameters : void + Return Type : void + Descritpion : Set OS lock to 0 + ------------------------------------------------------------------------------*/ +void clear_OS_LOCK(void){ + + __ASM volatile ( "MOV x0, 0x0 \n\t" + "MSR OSLAR_EL1, x0 \n\t" ::: "x0"); + +} + diff --git a/software/lib/sw_lib/apps/src/system.c b/software/lib/sw_lib/apps/src/system.c new file mode 100755 index 0000000000000000000000000000000000000000..28de726c15b5cfa0008fe833a6f27e530af2a83d --- /dev/null +++ b/software/lib/sw_lib/apps/src/system.c @@ -0,0 +1,339 @@ +/*------------------------------------------------------------------------------ + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * Release Information : SSE710-r0p0-00rel0 + * + *------------------------------------------------------------------------------ + */ + +#include <stdio.h> +#include <stdarg.h> +#include "system.h" +#include "platform.h" +#include "uart_stdout.h" +#include "sys_memory_map.h" +#include "intrinsics.h" + + +// void irq_handler(){ +// int source, interrupt, raw_interrupt; + +// } + +/** @file system.c + * @brief System Print, TEST_PASS/TEST_FAIL and placeholder for other helper functions + */ + +/* ----------------------------------------------------------------------------- + * Default exception handler function + * ----------------------------------------------------------------------------- + */ + +/* Abort */ +__attribute__((weak)) void abort_handler (void) { + printf ("Unexpected abort exception.\n"); + TEST_FAIL(); +} + +/* ----------------------------------------------------------------------------- + * C library retarget + * ----------------------------------------------------------------------------- + */ + +#ifndef SEMIHOST +__ASM(".global __use_no_semihosting"); +__ASM(".global __use_no_heap"); + +/* +* These must be defined to avoid linking in stdio.o from the +* C Library +*/ + +struct __FILE { int handle; /* Add whatever you need here */}; +FILE __stdout; +FILE __stdin; +FILE __stdout; + +/* +* __backspace must return the last char read to the stream +* fgetc() needs to keep a record of whether __backspace was +* called directly before it +*/ +int last_char_read; +int backspace_called; + + +/** @brief fuptc, print redirction function, characters written to UART or TBENCH component + * @return int + */ + +int fputc(int ch, FILE* f) { + unsigned char tempch = ch; + UartPutc(ch); + return ch; +} + +#ifdef __ARMCC_VERSION__ +/** @brief ferror, Empty function + * @return int + */ + +int ferror(FILE* f) { + return EOF; +} +#endif + +/** @brief sys_exit, Overrides sys_exit to print ^D (EOT) to the Tube to end the simulation + * @return int + */ + +void _sys_exit(int return_code) { + __wfi(); +} + +/** @brief _sys_command_string, Required for RVCT6 + * @return char * + */ + +char *_sys_command_string(char *cmd, int len) +{ + return NULL; +} + +/** @brief _ttywrch, Required for RVCT6 + * @return void + */ + +void _ttywrch(int ch) +{ + +} + +/** @brief __backspace, Required for RVCT6 + * @return int + */ + +int __backspace(FILE *f) +{ + backspace_called = 0X1; + return 1; +} + +#endif + + + +/* ----------------------------------------------------------------------------- + * Fast printing functions + * ----------------------------------------------------------------------------- + */ + +/** @brief c_print, This is fast print function where checks for % in input string, + * if no % found, then calls c_print_str to save execution time of + * full print function. + * @return int + */ + +int c_print(const char * fmt, ...) { + va_list args; + int tmp, count = 0; + char buffer[160]; + const char *parse_str = fmt; + int flag = 0; + + while ((*parse_str != '\0') || (count++)) { + if ((*parse_str == '%') || (count >= 160)) { + if (count == 160) { + c_print_str("String too long for c_print function\n"); + } + flag = 1; + break; + } + parse_str++; + } + + if (flag == 0) { + c_print_str(fmt); + return 0; + } + + va_start(args,fmt); + tmp = vsprintf(buffer, fmt, args); + va_end(args); + + count = 0; + do{ + UartPutc((unsigned int)buffer[count]); + count++; + } while(count<79 && buffer[count]!='\0'); + + return tmp; +} + + +/** @brief c_print_char, Fast print function to print char on UART + * @return void + */ + +void c_print_char(const char ch) +{ + UartPutc(ch); +} + + +/** @brief c_print_str, Fast print function to print char on UART + * @return int + */ + +int c_print_str(const char * fmt) { + int i = 0; + do{ + UartPutc(fmt[i]); + i++; + } while(i<79 && fmt[i]!='\0'); + + return 1; +} + + +/* ----------------------------------------------------------------------------- + * Default test pass fail functions + * ----------------------------------------------------------------------------- + */ + +/** @brief TEST_PASS, Terminates Test by printing test pass message + * @return void + */ + +__attribute__((weak)) void TEST_PASS(void) { + // Halt simulation + char *tube_addr = (char *)SYS_UART0_BASE; + c_print_str("TEST PASSED OK\n"); + *tube_addr = (char )4; + __wfi(); + +} + +/** @brief TEST_FAIL, Terminates Test by printing test FAIL message + * @return void + */ + +__attribute__((weak)) void TEST_FAIL(void) { + // Halt simulation + char *tube_addr = (char *)SYS_UART0_BASE; + c_print_str("TEST FAILED\n"); + *tube_addr = (char )4; + __wfi(); + +} + +/** @brief access_addr, function to read/write num_locations from base address + * @return int + */ + +int access_addr(unsigned long int base_address, unsigned int num_accesses) +{ + volatile unsigned int read_data; + volatile unsigned int i; + volatile unsigned int offset; + volatile unsigned int write_data; + volatile unsigned int error_count = 0; + + offset = 0x0; + write_data = 0xA5A50000; + + for(i = 0; i < num_accesses; i++) { + + printf("Writing address \n"); + + *(volatile unsigned long *)base_address = write_data; + + printf("Reading address \n"); + read_data = *(volatile unsigned long *)base_address; + if (read_data != write_data) { + printf("Read value does not match Written value\n"); + error_count++; + } + else { + printf("Read the right value\n"); + } + + offset += 0x4; + write_data++; + } + + return(error_count); +} + +/** @brief access_addr_wdata, function to read/write num_locations from base addres with given data + * @return int + */ + +int access_addr_wdata(unsigned long int base_address, unsigned int num_accesses, unsigned int write_data) +{ + volatile unsigned int read_data; + volatile unsigned int i; + volatile unsigned int offset; + volatile unsigned int error_count = 0; + + offset = 0x0; + + for(i = 0; i < num_accesses; i++) { + + printf("WA \n"); + + *(volatile unsigned long *)base_address = write_data; + + __dsb(0xf); + __sev(); + + printf("RA \n"); + read_data = *(volatile unsigned long *)base_address; + if (read_data != write_data) { + printf("No MA \n"); + error_count++; + } + else { + printf("MA \n"); + } + + offset += 0x4; + write_data++; + } + + return(error_count); +} + + +/** @brief access_addr_writes, function to read/write num_locations from base addres with given data + * @return int + */ + +void access_addr_writes(unsigned long int base_address, unsigned int num_accesses, unsigned int write_data) +{ + volatile unsigned int i; + volatile unsigned int offset; + + offset = 0x0; + + for(i = 0; i < num_accesses; i++) { + + printf("WA \n"); + + *(volatile unsigned long *)base_address = write_data; + + offset += 0x4; + write_data++; + } +} + diff --git a/software/lib/sw_lib/apps/src/vect_64.s b/software/lib/sw_lib/apps/src/vect_64.s new file mode 100755 index 0000000000000000000000000000000000000000..c30ce7f7f703a7b24a537302ca889151ef2f4db2 --- /dev/null +++ b/software/lib/sw_lib/apps/src/vect_64.s @@ -0,0 +1,180 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- +// Purpose : Exception vector +// ----------------------------------------------------------------------------- + + .section VECTORS + .text + .balign 2048 + + .global monitor_vectors + + +// PADDING + .macro pad_zero from=0, to=16 + .word 0 + .if \to-\from + pad_zero "(\from+1)",\to + .endif + .endm + + .macro SAVE_X1_X30 + STP x29, x30, [sp, #-0x10]! + STP x27, x28, [sp, #-0x10]! + STP x25, x26, [sp, #-0x10]! + STP x23, x24, [sp, #-0x10]! + STP x21, x22, [sp, #-0x10]! + STP x19, x20, [sp, #-0x10]! + STP x17, x18, [sp, #-0x10]! + STP x15, x16, [sp, #-0x10]! + STP x13, x14, [sp, #-0x10]! + STP x11, x12, [sp, #-0x10]! + STP x9, x10, [sp, #-0x10]! + STP x7, x8, [sp, #-0x10]! + STP x5, x6, [sp, #-0x10]! + STP x3, x4, [sp, #-0x10]! + STP x1, x2, [sp, #-0x10]! + .endm + + .macro RESTORE_X1_X30 + LDP x1, x2, [sp], #0x10 + LDP x3, x4, [sp], #0x10 + LDP x5, x6, [sp], #0x10 + LDP x7, x8, [sp], #0x10 + LDP x9, x10, [sp], #0x10 + LDP x11, x12, [sp], #0x10 + LDP x13, x14, [sp], #0x10 + LDP x15, x16, [sp], #0x10 + LDP x17, x18, [sp], #0x10 + LDP x19, x20, [sp], #0x10 + LDP x21, x22, [sp], #0x10 + LDP x23, x24, [sp], #0x10 + LDP x25, x26, [sp], #0x10 + LDP x27, x28, [sp], #0x10 + LDP x29, x30, [sp], #0x10 + .endm + + //================================================================== +// EL3 VECTOR TABLE +//================================================================== + +monitor_vectors : + B . // Current EL 32bits: Synchronous + .balign 128 + B . //IRQ/vIRQ + .balign 128 + B . //FIQ/vFIQ + .balign 128 + B . // //Error/vError + .balign 128 + + + + //B . // Current EL 64bits: Synchronous + b sync_abort_handler + .balign 128 + B el1_irq_handler // IRQ/vIRQ + .balign 128 + B el1_irq_handler //FIQ/vFIQ + .balign 128 + B sync_abort_handler //Error/vError + + .balign 128 + B . //Lower EL SPx: //Synchronous + .balign 128 + B el1_irq_handler //IRQ/vIRQ + .balign 128 + B . //FIQ/vFIQ + .balign 128 + B . //Error/vError + + .balign 128 + B . //Lower EL SP0: Synchronous + .balign 128 + B . //IRQ/vIRQ + .balign 128 + B . //FIQ/vFIQ + .balign 128 + B . //Error/vError + + +// + .type el1_irq_handler, @function +el1_irq_handler: + + SAVE_X1_X30 + + mrs x1, sp_el0 + mrs x2, elr_el1 + mrs x3, spsr_el1 + + // save x0 and stack pointer + stp x1, x0, [sp, #-0x10]! + + // save elr and spsr + stp x2, x3, [sp, #-0x10]! + + bl irq_handler + + ldp x0, x1, [sp], #0x10 + + msr elr_el1, x0 + msr spsr_el1, x1 + + ldp x1, x0, [sp], #0x10 + msr sp_el0, x1 + + RESTORE_X1_X30 + + eret + + +sync_abort_handler : + SAVE_X1_X30 + + mrs x1, sp_el0 + mrs x2, elr_el1 + mrs x3, spsr_el1 + + // save x0 and stack pointer + stp x1, x0, [sp, #-0x10]! + + // save elr and spsr + stp x2, x3, [sp, #-0x10]! + + bl sync_ext_abort_handler + + ldp x0, x1, [sp], #0x10 + + msr elr_el1, x0 + msr spsr_el1, x1 + + ldp x1, x0, [sp], #0x10 + msr sp_el0, x1 + + mrs x1, elr_el3 + add x1, x1, #0x4 + msr elr_el3, x1 + + RESTORE_X1_X30 + + eret + + +// Padding + pad_zero 0,16 + pad_zero 0,16 diff --git a/software/lib/sw_lib/common/include/host_chassis_control.h b/software/lib/sw_lib/common/include/host_chassis_control.h new file mode 100755 index 0000000000000000000000000000000000000000..fbca58b6d15b6fe06788f13e8653f7c2f36b7004 --- /dev/null +++ b/software/lib/sw_lib/common/include/host_chassis_control.h @@ -0,0 +1,599 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#include "global_defines.h" + +#ifndef HOST_CHASSIS_CTRL_DEF_H +#define HOST_CHASSIS_CTRL_DEF_H + +#define HOST_CHASSIS_CTRL_RES0_SIZE 3 +#define HOST_CHASSIS_CTRL_RES1_SIZE 1 +#define HOST_CHASSIS_CTRL_RES2_SIZE 1 +#define HOST_CHASSIS_CTRL_RES3_SIZE 1 +#define HOST_CHASSIS_CTRL_RES4_SIZE 109 +#define HOST_CHASSIS_CTRL_RES5_SIZE 63 +#define HOST_CHASSIS_CTRL_RES6_SIZE 2 +#define HOST_CHASSIS_CTRL_RES7_SIZE 52 +#define HOST_CHASSIS_CTRL_RES8_SIZE 62 +#define HOST_CHASSIS_CTRL_RES9_SIZE 189 +#define HOST_CHASSIS_CTRL_RES10_SIZE 1 +#define HOST_CHASSIS_CTRL_RES11_SIZE 2 +#define HOST_CHASSIS_CTRL_RES12_SIZE 2 +#define HOST_CHASSIS_CTRL_RES13_SIZE 2 +#define HOST_CHASSIS_CTRL_RES14_SIZE 2 +#define HOST_CHASSIS_CTRL_RES15_SIZE 59 +#define HOST_CHASSIS_CTRL_RES16_SIZE 307 +#define HOST_CHASSIS_CTRL_RES19_SIZE 103 + +typedef union{ + struct + { + uint32_t CRYPTODISABLE:1; + uint32_t RESERVED:31; + } B; + uint32_t W; +} CLUSTER_CONFIG_Type; + +typedef union{ + struct + { + uint32_t CFGEND:1; + uint32_t CFGTE:1; + uint32_t VINITHI:1; + uint32_t AA64nAA32:1; + uint32_t RESERVED:28; + } B; + uint32_t W; +} PEn_CONFIG_Type; + +typedef union{ + struct + { + uint32_t RESERVED:2; + uint32_t RVBAR31_2:30; + } B; + uint32_t W; +} PEn_RVBARADDR_LW_Type; + +typedef union{ + struct + { + uint32_t RVBAR43_32:12; + uint32_t RESERVED:20; + } B; + uint32_t W; +} PEn_RVBARADDR_UP_Type; + +typedef union{ + struct + { + uint32_t POR:1; + uint32_t nSRST:1; + uint32_t SDC:1; + uint32_t HOST:1; + uint32_t RESERVED:28; + } B; + uint32_t W; +} HOST_RST_SYN_Type; + +typedef union{ + struct + { + uint32_t BOOT_MSK:4; + uint32_t RESERVED:28; + } B; + uint32_t W; +} HOST_CPU_BOOT_MSK_Type; + +typedef union{ + struct + { + uint32_t PWR_REQ:1; + uint32_t MEM_RET_REQ:1; + uint32_t RESERVED:30; + } B; + uint32_t W; +} HOST_CPU_CLUS_PWR_REQ_Type; + +typedef union{ + struct + { + uint32_t CORE0_WAKEUP:1; + uint32_t CORE1_WAKEUP:1; + uint32_t CORE2_WAKEUP:1; + uint32_t CORE3_WAKEUP:1; + uint32_t RESERVED:28; + } B; + uint32_t W; +} HOST_CPU_WAKEUP_Type; + +typedef union{ + struct + { + uint32_t CPUWAIT:1; + uint32_t RST_REQ:1; + uint32_t RESERVED:30; + } B; + uint32_t W; +} EXT_SYSn_RST_CTRL_Type; + +typedef union{ + struct + { + uint32_t RESERVED0:1; + uint32_t RST_ACK:2; + uint32_t RESERVED1:29; + } B; + uint32_t W; +} EXT_SYSn_RST_ST_Type; + +typedef union{ + struct + { + uint32_t WAKEUP_EN:1; + uint32_t REFCLK_REQ:1; + uint32_t DBGTOP_PWR_REQ:1; + uint32_t SYSTOP_PWR_REQ:3; + uint32_t RESERVED:26; + } B; + uint32_t W; +} CHS_PWR_REQ_Type; + +typedef union{ + struct + { + uint32_t RESERVED0:2; + uint32_t DBGTOP_PWR_ST:1; + uint32_t SYSTOP_PWR_ST:3; + uint32_t RESERVED1:26; + } B; + uint32_t W; +} CHS_PWR_ST_Type; + +typedef union{ + struct + { + uint32_t HOST_FW_LOCK:1; + uint32_t INT_RTR_LOCK:1; + uint32_t HOST_CPU0_LOCK:1; + uint32_t HOST_CPU1_LOCK:1; + uint32_t HOST_CPU2_LOCK:1; + uint32_t HOST_CPU3_LOCK:1; + uint32_t HOST_GIC_LOCK:1; + uint32_t HOST_CHS_LOCK:1; + uint32_t RESERVED:23; + uint32_t WOR:1; + } B; + uint32_t W; +} HOST_SYS_LCTRL_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:16; + } B; + uint32_t W; +} HOSTCPUCLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} HOSTCPUCLK_DIV0_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} HOSTCPUCLK_DIV1_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:16; + } B; + uint32_t W; +} GICCLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} GICCLK_DIV0_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:8; + uint32_t ENTRY_DELAY:8; + } B; + uint32_t W; +} ACLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} ACLK_DIV0_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:8; + uint32_t ENTRY_DELAY:8; + } B; + uint32_t W; +} CTRLCLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} CTRLCLK_DIV0_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:8; + uint32_t ENTRY_DELAY:8; + } B; + uint32_t W; +} DBGCLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} DBGCLK_DIV0_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:16; + } B; + uint32_t W; +} HOSTUARTCLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t CLKDIV:5; + uint32_t RESERVED0:11; + uint32_t CLKDIV_CUR:5; + uint32_t RESERVED1:11; + } B; + uint32_t W; +} HOSTUARTCLK_DIV0_Type; + +typedef union{ + struct + { + uint32_t CLKSELECT:8; + uint32_t CLKSELECT_CUR:8; + uint32_t RESERVED:8; + uint32_t ENTRY_DELAY:8; + } B; + uint32_t W; +} REFCLK_CTRL_Type; + +typedef union{ + struct + { + uint32_t RESERVED0:1; + uint32_t GICCLK_FORCE_ST:1; + uint32_t ACLK_FORCE_ST:1; + uint32_t CTRLCLK_FORCE_ST:1; + uint32_t DBGCLK_FORCE_ST:1; + uint32_t RESERVED1:27; + } B; + uint32_t W; +} CLKFORCE_ST_Type; + +typedef union{ + struct + { + uint32_t RESERVED0:1; + uint32_t GICCLK_FORCE_SET:1; + uint32_t ACLK_FORCE_SET:1; + uint32_t CTRL_CLK_FORCE_SET:1; + uint32_t DBGCLK_FORCE_SET:1; + uint32_t RESERVED1:27; + } B; + uint32_t W; +} CLKFORCE_SET_Type; + +typedef union{ + struct + { + uint32_t RESERVED0:1; + uint32_t GICCLK_FORCE_CLR:1; + uint32_t ACLK_FORCE_CLR:1; + uint32_t CTRL_CLK_FORCE_CLR:1; + uint32_t DBGCLK_FORCE_CLR:1; + uint32_t RESERVED1:27; + } B; + uint32_t W; +} CLKFORCE_CLR_Type; + +typedef union{ + struct + { + uint32_t SYSPLLLOCK_ST:1; + uint32_t CPUPLLLOCK_ST:1; + uint32_t RESERVED:30; + } B; + uint32_t W; +} PLL_ST_Type; + +typedef union{ + struct + { + uint32_t FW_INT_ST:1; + uint32_t DBGTOP_INT_ST:1; + uint32_t SYSTOP_INT_ST:1; + uint32_t CLUSTOP_INT_ST:1; + uint32_t CORE0_INT_ST:1; + uint32_t CORE1_INT_ST:1; + uint32_t CORE2_INT_ST:1; + uint32_t CORE3_INT_ST:1; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_PPU_INT_ST_Type; + +typedef union{ + struct + { + uint32_t DES_2:4; + uint32_t Size:4; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_PID4_Type; + +typedef union{ + struct + { + uint32_t RESERVED:32; + } B; + uint32_t W; +} HOST_CH_PID5_Type; + +typedef union{ + struct + { + uint32_t RESERVED:32; + } B; + uint32_t W; +} HOST_CH_PID6_Type; + +typedef union{ + struct + { + uint32_t RESERVED:32; + } B; + uint32_t W; +} HOST_CH_PID7_Type; + +typedef union{ + struct + { + uint32_t PART_0:8; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_PID0_Type; + +typedef union{ + struct + { + uint32_t PART_1:4; + uint32_t DES_0:4; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_PID1_Type; + +typedef union{ + struct + { + uint32_t DES_1:3; + uint32_t JEDEC:1; + uint32_t REVISION:4; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_PID2_Type; + +typedef union{ + struct + { + uint32_t CMOD:4; + uint32_t REVAD:4; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_PID3_Type; + +typedef union{ + struct + { + uint32_t PRMBL_0:8; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_CID0_Type; + +typedef union{ + struct + { + uint32_t PRMBL_1:4; + uint32_t CLASS:4; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_CID1_Type; + +typedef union{ + struct + { + uint32_t PRMBL_2:8; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_CID2_Type; + +typedef union{ + struct + { + uint32_t PRMBL_3:8; + uint32_t RESERVED:24; + } B; + uint32_t W; +} HOST_CH_CID3_Type; + + +typedef struct{ + __IO CLUSTER_CONFIG_Type CLUSTER_CONFIG; /*!< Offset: 0x000 */ + __I uint32_t RESERVED0[HOST_CHASSIS_CTRL_RES0_SIZE]; /*!< Offset: 0x004-0x00C */ + __IO PEn_CONFIG_Type PE0_CONFIG; /*!< Offset: 0x010 */ + __IO PEn_RVBARADDR_LW_Type PE0_RVBARADDR_LW; /*!< Offset: 0x014 */ + __IO PEn_RVBARADDR_UP_Type PE0_RVBARADDR_UP; /*!< Offset: 0x018 */ + __IO uint32_t RESERVED1[HOST_CHASSIS_CTRL_RES1_SIZE]; /*!< Offset: 0x01C */ + __IO PEn_CONFIG_Type PE1_CONFIG; /*!< Offset: 0x020 */ + __IO PEn_RVBARADDR_LW_Type PE1_RVBARADDR_LW; /*!< Offset: 0x024 */ + __IO PEn_RVBARADDR_UP_Type PE1_RVBARADDR_UP; /*!< Offset: 0x028 */ + __IO uint32_t RESERVED2[HOST_CHASSIS_CTRL_RES2_SIZE]; /*!< Offset: 0x02C */ + __IO PEn_CONFIG_Type PE2_CONFIG; /*!< Offset: 0x030 */ + __IO PEn_RVBARADDR_LW_Type PE2_RVBARADDR_LW; /*!< Offset: 0x034 */ + __IO PEn_RVBARADDR_UP_Type PE2_RVBARADDR_UP; /*!< Offset: 0x038 */ + __IO uint32_t RESERVED3[HOST_CHASSIS_CTRL_RES3_SIZE]; /*!< Offset: 0x03C */ + __IO PEn_CONFIG_Type PE3_CONFIG; /*!< Offset: 0x040 */ + __IO PEn_RVBARADDR_LW_Type PE3_RVBARADDR_LW; /*!< Offset: 0x044 */ + __IO PEn_RVBARADDR_UP_Type PE3_RVBARADDR_UP; /*!< Offset: 0x048 */ + __IO uint32_t RESERVED4[HOST_CHASSIS_CTRL_RES4_SIZE]; /*!< Offset: 0x04C-0x1FC */ + __I HOST_RST_SYN_Type HOST_RST_SYN; /*!< Offset: 0x200 */ + __IO uint32_t RESERVED5[HOST_CHASSIS_CTRL_RES5_SIZE]; /*!< Offset: 0x204-0x2FC */ + __IO HOST_CPU_BOOT_MSK_Type HOST_CPU_BOOT_MSK_Type; /*!< Offset: 0x300 */ + __IO HOST_CPU_CLUS_PWR_REQ_Type HOST_CPU_CLUS_PWR_REQ; /*!< Offset: 0x304 */ + __O HOST_CPU_WAKEUP_Type HOST_CPU_WAKEUP; /*!< Offset: 0x308 */ + __IO uint32_t RESERVED6[HOST_CHASSIS_CTRL_RES1_SIZE]; /*!< Offset: 0x30C */ + __IO EXT_SYSn_RST_CTRL_Type EXT_SYS0_RST_CTRL; /*!< Offset: 0x310 */ + __IO EXT_SYSn_RST_ST_Type EXT_SYS0_RST_ST; /*!< Offset: 0x314 */ + __IO EXT_SYSn_RST_CTRL_Type EXT_SYS1_RST_CTRL; /*!< Offset: 0x318 */ + __IO EXT_SYSn_RST_ST_Type EXT_SYS1_RST_ST; /*!< Offset: 0x31C */ + __IO EXT_SYSn_RST_CTRL_Type EXT_SYS2_RST_CTRL; /*!< Offset: 0x320 */ + __IO EXT_SYSn_RST_ST_Type EXT_SYS2_RST_ST; /*!< Offset: 0x324 */ + __IO EXT_SYSn_RST_CTRL_Type EXT_SYS3_RST_CTRL; /*!< Offset: 0x328 */ + __IO EXT_SYSn_RST_ST_Type EXT_SYS3_RST_ST; /*!< Offset: 0x32C */ + __IO uint32_t RESERVED7[HOST_CHASSIS_CTRL_RES7_SIZE]; /*!< Offset: 0x330-0x3FC */ + __IO CHS_PWR_REQ_Type CHS_PWR_REQ; /*!< Offset: 0x400 */ + __I CHS_PWR_ST_Type CHS_PWR_ST; /*!< Offset: 0x404 */ + __IO uint32_t RESERVED8[HOST_CHASSIS_CTRL_RES8_SIZE]; /*!< Offset: 0x408-0x4FC */ + __I HOST_SYS_LCTRL_Type HOST_SYS_LCTRL_ST; /*!< Offset: 0x500 */ + __O HOST_SYS_LCTRL_Type HOST_SYS_LCTRL_SET; /*!< Offset: 0x504 */ + __O HOST_SYS_LCTRL_Type HOST_SYS_LCTRL_CLR; /*!< Offset: 0x508 */ + __IO uint32_t RESERVED9[HOST_CHASSIS_CTRL_RES9_SIZE]; /*!< Offset: 0x504-0x7FC */ + __IO HOSTCPUCLK_CTRL_Type HOSTCPUCLK_CTRL; /*!< Offset: 0x800 */ + __IO HOSTCPUCLK_DIV0_Type HOSTCPUCLK_DIV0; /*!< Offset: 0x804 */ + __IO HOSTCPUCLK_DIV1_Type HOSTCPUCLK_DIV1; /*!< Offset: 0x808 */ + __IO uint32_t RESERVED10[HOST_CHASSIS_CTRL_RES10_SIZE]; /*!< Offset: 0x80C */ + __IO GICCLK_CTRL_Type GICCLK_CTRL; /*!< Offset: 0x810 */ + __IO GICCLK_DIV0_Type GICCLK_DIV0; /*!< Offset: 0x814 */ + __IO uint32_t RESERVED11[HOST_CHASSIS_CTRL_RES11_SIZE]; /*!< Offset: 0x818-0x81C */ + __IO ACLK_CTRL_Type ACLK_CTRL; /*!< Offset: 0x820 */ + __IO ACLK_DIV0_Type ACLK_DIV0; /*!< Offset: 0x824 */ + __IO uint32_t RESERVED12[HOST_CHASSIS_CTRL_RES12_SIZE]; /*!< Offset: 0x828-0x82C */ + __IO CTRLCLK_CTRL_Type CTRLCLK_CTRL; /*!< Offset: 0x830 */ + __IO CTRLCLK_DIV0_Type CTRLCLK_DIV0; /*!< Offset: 0x834 */ + __IO uint32_t RESERVED13[HOST_CHASSIS_CTRL_RES13_SIZE]; /*!< Offset: 0x838-0x83C */ + __IO DBGCLK_CTRL_Type DBGCLK_CTRL; /*!< Offset: 0x840 */ + __IO DBGCLK_DIV0_Type DBGCLK_DIV0; /*!< Offset: 0x844 */ + __IO uint32_t RESERVED17[HOST_CHASSIS_CTRL_RES13_SIZE]; /*!< Offset: 0x848-0x84C */ + __IO HOSTUARTCLK_CTRL_Type HOSTUARTCLK_CTRL; /*!< Offset: 0x850 */ + __IO HOSTUARTCLK_DIV0_Type HOSTUARTCLK_DIV0; /*!< Offset: 0x854 */ + __IO uint32_t RESERVED14[HOST_CHASSIS_CTRL_RES14_SIZE]; /*!< Offset: 0x858-0x85C */ + __IO REFCLK_CTRL_Type REFCLK_CTRL; /*!< Offset: 0x860 */ + __IO uint32_t RESERVED19[HOST_CHASSIS_CTRL_RES19_SIZE]; /*!< Offset: 0x864-0x9FC */ + __I CLKFORCE_ST_Type CLKFORCE_ST; /*!< Offset: 0xA00 */ + __O CLKFORCE_SET_Type CLKFORCE_SET; /*!< Offset: 0xA04 */ + __O CLKFORCE_CLR_Type CLKFORCE_CLR; /*!< Offset: 0xA08 */ + __IO uint32_t RESERVED18[HOST_CHASSIS_CTRL_RES1_SIZE]; /*!< Offset: 0xA0C */ + __I PLL_ST_Type PLL_ST; /*!< Offset: 0xA10 */ + __IO uint32_t RESERVED15[HOST_CHASSIS_CTRL_RES15_SIZE]; /*!< Offset: 0xA14-0xAFC */ + __I HOST_PPU_INT_ST_Type HOST_PPU_INT_ST; /*!< Offset: 0xB00 */ + __IO uint32_t RESERVED16[HOST_CHASSIS_CTRL_RES16_SIZE]; /*!< Offset: 0xB04-0xFCC */ + __IO HOST_CH_PID4_Type PID4; /*!< Offset: 0xFD0 */ + __IO HOST_CH_PID5_Type PID5; /*!< Offset: 0xFD4 */ + __IO HOST_CH_PID6_Type PID6; /*!< Offset: 0xFD8 */ + __IO HOST_CH_PID7_Type PID7; /*!< Offset: 0xFDC */ + __IO HOST_CH_PID0_Type PID0; /*!< Offset: 0xFE0 */ + __IO HOST_CH_PID1_Type PID1; /*!< Offset: 0xFE4 */ + __IO HOST_CH_PID2_Type PID2; /*!< Offset: 0xFE8 */ + __IO HOST_CH_PID3_Type PID3; /*!< Offset: 0xFEC */ + __IO HOST_CH_CID0_Type CID0; /*!< Offset: 0xFF0 */ + __IO HOST_CH_CID1_Type CID1; /*!< Offset: 0xFF4 */ + __IO HOST_CH_CID2_Type CID2; /*!< Offset: 0xFF8 */ + __IO HOST_CH_CID3_Type CID3; /*!< Offset: 0xFFC */ + + +} HOST_CHASSIS_CTRL_TypeDef; + + +#endif /* HOST_CHASSIS_CTRL_DEF_H */ diff --git a/software/lib/sw_lib/common/include/system_level_functions.h b/software/lib/sw_lib/common/include/system_level_functions.h new file mode 100755 index 0000000000000000000000000000000000000000..c4d568edbf19c4568f17a6f802ea5c635b207c5d --- /dev/null +++ b/software/lib/sw_lib/common/include/system_level_functions.h @@ -0,0 +1,140 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef __SYSTEM_LEVEL_FUNCTIONS_H__ +#define __SYSTEM_LEVEL_FUNCTIONS_H__ + +// Every "register" should have a separate memory line +// 64 bit aligned wait offsets for Secure Enclave +#define CPUSYNC_SECENC_SE_OFFSET 0x0 +#define CPUSYNC_SECENC_ES0_OFFSET 0x8 +#define CPUSYNC_SECENC_ES1_OFFSET 0x10 +#define CPUSYNC_SECENC_HS0_OFFSET 0x18 +// 64 bit aligned wait offsets for Secure Enclave +#define CPUSYNC_EXTSYS0_SE_OFFSET 0x20 +#define CPUSYNC_EXTSYS0_ES0_OFFSET 0x28 +#define CPUSYNC_EXTSYS0_ES1_OFFSET 0x30 +#define CPUSYNC_EXTSYS0_HS0_OFFSET 0x38 +// 64 bit aligned wait offsets for Secure Enclave +#define CPUSYNC_EXTSYS1_SE_OFFSET 0x40 +#define CPUSYNC_EXTSYS1_ES0_OFFSET 0x48 +#define CPUSYNC_EXTSYS1_ES1_OFFSET 0x50 +#define CPUSYNC_EXTSYS1_HS0_OFFSET 0x58 +// 64 bit aligned wait offsets for Secure Enclave +#define CPUSYNC_HOST0_SE_OFFSET 0x60 +#define CPUSYNC_HOST0_ES0_OFFSET 0x68 +#define CPUSYNC_HOST0_ES1_OFFSET 0x70 +#define CPUSYNC_HOST0_HS0_OFFSET 0x78 + +//Include for Common memory tester functions +#include <stdio.h> +#include <inttypes.h> +#include "stdlib.h" + +#include "system.h" +// CPU_CM3 should be defined in the makfile by adding -DSYNC_CPU_CM3_<0,1>, -DSYNC_CPU_CM0, -DSYNC_CPU_HOST to the C flags +#if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) || defined(SYNC_CPU_CM0) + #include "mem_map.h" +#endif +#if defined(SYNC_CPU_HOST) + #include "sys_memory_map.h" +#endif + +///////////////////////////////////// +// CPU synchronising functions +///////////////////////////////////// + +// Each CPU has its own set of "registers" for each other masters that can wait for it CPUSYNC_*_OFFSET +// Each line has the following structure: +// Self "registers" determines that the functionality activated or not: +// CPUSYNC_SECENC_SE_OFFSET = 0x1 -> Secure Enclave CPUSync functionality is ACTIVATED +// CPUSYNC_SECENC_SE_OFFSET = 0x0 -> Secure Enclave CPUSync functionality is DEACTIVATED +// Other "registers" can tell that another master is waiting for the current CPU: +// CPUSYNC_SECENC_ES0_OFFSET = 0x1 -> External System 0 is waiting for Secure Enclave +// CPUSYNC_SECENC_ES0_OFFSET = 0x0 -> External System 0 is NOT waiting for Secure Enclave + +// Init function to use by the CM0 when after reset +#if defined(SYNC_CPU_CM0) + void CPUSync_Init(); +#endif + +// Wait functions can be called by each CPU to wait for another +// A CPU can not call a wait function to itself (does not exists because of the defines) +// Each CPU registers itself as "currently waiting" at the target CPU's proper "register" +// When the wait is finished the "currently waiting" status is cleared by the CPU it was waiting for +// by calling CPUSync_Done() +#if !defined(SYNC_CPU_CM0) + void CPUSync_WaitForSecEnc(); +#endif +#if !defined(SYNC_CPU_CM3_0) + void CPUSync_WaitForExtSys0(); +#endif +#if !defined(SYNC_CPU_CM3_1) + void CPUSync_WaitForExtSys1(); +#endif +#if !defined(SYNC_CPU_HOST) + void CPUSync_WaitForHost0(); +#endif + +// Use this on the CPU which is working to indicate job done +// If the working CPU has finished then it clears all its status "register" to let other waiting CPUs continue +void CPUSync_Done(); + +// Activate functionality +// Activate syncing functionality, by default it is active +void CPUSync_Activate(); +// DeActivate syncing functionality, by default it is active +void CPUSync_DeActivate(); + +// Status functions +// Get the currently waiting CPUs vector: +// [0] - SECENC is '1' wating or '0' not +// [1] - EXTSYS0 is '1' wating or '0' not +// [2] - EXTSYS1 is '1' wating or '0' not +// [3] - HOST0 is '1' wating or '0' not +// Self bits are always '0'. It is impossible for a CPU to wait for itself +int get_CPUSync_SecEncWaitList(); +int get_CPUSync_ExtSys0WaitList(); +int get_CPUSync_ExtSys1WaitList(); +int get_CPUSync_Host0WaitList(); + +///////////////////////////////////// +// Common memory tester functions +///////////////////////////////////// +#ifndef HW_REG_BYTE +#define HW_REG_BYTE(base,offset) (*(volatile uint8_t *)(uintptr_t)((base) + (offset))) +#endif +#ifndef HW_REG_WORD +#define HW_REG_WORD(base,offset) (*(volatile uint32_t *)(uintptr_t)((base) + (offset))) +#endif +#ifndef HW_REG_HALF +#define HW_REG_HALF(base,offset) (*(volatile unsigned short int *)(uintptr_t)((base) + (offset))) +#endif +uint32_t DataBusWalking(uint32_t BaseAddr, uint32_t Offset); +uint32_t AddressWalking(uint32_t BaseAddr, uint32_t TopAddr,uint32_t StartPos, uint32_t* RbAddr); +uint32_t AddressWalkingP(uint32_t BaseAddr, uint32_t TopAddr, uint32_t BackupAddr, uint32_t Preserve, uint32_t StartPos, uint32_t* RbAddr); +uint32_t UnalignedAccess(uint32_t BaseAddr, uint32_t TopAddr, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr); +uint32_t UnalignedAccessP(uint32_t BaseAddr, uint32_t TopAddr, uint32_t BackupAddr, uint32_t Preserve, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr); +uint32_t WordHalfWordAccess(uint32_t BaseAddr, uint32_t TopAddr, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr); +uint32_t WordHalfWordAccessP(uint32_t BaseAddr, uint32_t TopAddr, uint32_t BackupAddr, uint32_t Preserve, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr); + + +#endif //__SYSTEM_LEVEL_FUNCTIONS_H__ + + + + diff --git a/software/lib/sw_lib/common/src/system_level_functions.c b/software/lib/sw_lib/common/src/system_level_functions.c new file mode 100755 index 0000000000000000000000000000000000000000..9b97412f80bd21eaccd6e43cbb453d3e4c37fe6e --- /dev/null +++ b/software/lib/sw_lib/common/src/system_level_functions.c @@ -0,0 +1,878 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef __SYSTEM_LEVEL_FUNCTIONS_C__ +#define __SYSTEM_LEVEL_FUNCTIONS_C__ + +#include "system_level_functions.h" + +#if defined(SYNC_CPU_CM0) + void CPUSync_Init() { + for(int i=0x0; i < 0x80; i+=0x4){ + MEM_RW(CPUSYNC_BASE,i) = 0x0; + } + } +#endif + +#if !defined(SYNC_CPU_CM0) + void CPUSync_WaitForSecEnc() { + // wait only if the deactive bit is not set + if((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_SE_OFFSET) & 0x1) == 0x0) { + // first register the current waiting CPU in its proper location + #if defined(SYNC_CPU_CM0) + // Not allowed to call for itself + #elif defined(SYNC_CPU_CM3_0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES0_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES0_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_SE_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined(SYNC_CPU_CM3_1) + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES1_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES1_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_SE_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined (SYNC_CPU_HOST) + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_HS0_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_HS0_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_SE_OFFSET) & 0x1) != 0x1) + ) {} + #endif + } + } +#endif + +#if !defined(SYNC_CPU_CM3_0) + void CPUSync_WaitForExtSys0() { + // wait only if the deactive bit is not set + if((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES0_OFFSET) & 0x1) == 0x0) { + // first register the current waiting CPU in its proper location + #if defined(SYNC_CPU_CM0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_SE_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_SE_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES0_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined(SYNC_CPU_CM3_0) + // Not allowed to call for itself + #elif defined(SYNC_CPU_CM3_1) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES1_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES1_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES0_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined (SYNC_CPU_HOST) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_HS0_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_HS0_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES0_OFFSET) & 0x1) != 0x1) + ){} + #endif + } + } +#endif + +#if !defined(SYNC_CPU_CM3_1) + void CPUSync_WaitForExtSys1() { + // wait only if the deactive bit is not set + if((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES1_OFFSET) & 0x1) == 0x0) { + // first register the current waiting CPU in its proper location + #if defined(SYNC_CPU_CM0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_SE_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_SE_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES1_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined(SYNC_CPU_CM3_0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES0_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES0_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES1_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined(SYNC_CPU_CM3_1) + // Not allowed to call for itself + #elif defined (SYNC_CPU_HOST) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_HS0_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_HS0_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES1_OFFSET) & 0x1) != 0x1) + ) {} + #endif + } + } +#endif + +#if !defined(SYNC_CPU_HOST) + void CPUSync_WaitForHost0() { + // wait only if the deactive bit is not set + if((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_HS0_OFFSET) & 0x1) == 0x0) { + // first register the current waiting CPU in its proper location + #if defined(SYNC_CPU_CM0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_SE_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_SE_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_HS0_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined(SYNC_CPU_CM3_0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES0_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES0_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_HS0_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined(SYNC_CPU_CM3_1) + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES1_OFFSET) = 0x1; + // wait until it is finished or deactivated + while ( + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES1_OFFSET) & 0x1) != 0x0) && + ((MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_HS0_OFFSET) & 0x1) != 0x1) + ) {} + #elif defined (SYNC_CPU_HOST) + // Not allowed to call for itself + #endif + } + } +#endif + +// Clear all pending wait "register" for the actual CPU +void CPUSync_Done() { +#if defined(SYNC_CPU_CM0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES0_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES1_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_HS0_OFFSET) = 0x0; +#elif defined(SYNC_CPU_CM3_0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_SE_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES1_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_HS0_OFFSET) = 0x0; +#elif defined(SYNC_CPU_CM3_1) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_SE_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES0_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_HS0_OFFSET) = 0x0; +#elif defined(SYNC_CPU_HOST) + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_SE_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES0_OFFSET) = 0x0; + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES1_OFFSET) = 0x0; +#else + // undefined CPU +#endif +} + +// To deactivate write 0x1 to the self "register" +void CPUSync_DeActivate() { +#if defined(SYNC_CPU_CM0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_SE_OFFSET) = 0x1; +#elif defined(SYNC_CPU_CM3_0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES0_OFFSET) = 0x1; +#elif defined(SYNC_CPU_CM3_1) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES1_OFFSET) = 0x1; +#elif defined(SYNC_CPU_HOST) + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_HS0_OFFSET) = 0x1; +#else + // undefined CPU +#endif +} + +// To activate write 0x0 to the self "register" (by default it is activated) +void CPUSync_Activate() { +#if defined(SYNC_CPU_CM0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_SE_OFFSET) = 0x0; +#elif defined(SYNC_CPU_CM3_0) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES0_OFFSET) = 0x0; +#elif defined(SYNC_CPU_CM3_1) + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES1_OFFSET) = 0x0; +#elif defined(SYNC_CPU_HOST) + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_HS0_OFFSET) = 0x0; +#else + // undefined CPU +#endif +} + +// Get the currently waiting CPUs vector: +// [0] - SECENC is '1' wating or '0' not +// [1] - EXTSYS0 is '1' wating or '0' not +// [2] - EXTSYS1 is '1' wating or '0' not +// [3] - HOST0 is '1' wating or '0' not +// Self bits are always '0'. It is impossible for a CPU to wait for itself +int get_CPUSync_SecEncWaitList() { + return ( + (MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES0_OFFSET) << 1) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_ES1_OFFSET) << 2) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_SECENC_HS0_OFFSET) << 3) + ); +} +int get_CPUSync_ExtSys0WaitList() { + return ( + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_SE_OFFSET) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_ES1_OFFSET) << 2) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS0_HS0_OFFSET) << 3) + ); +} +int get_CPUSync_ExtSys1WaitList() { + return ( + MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_SE_OFFSET) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_ES0_OFFSET) << 1) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_EXTSYS1_HS0_OFFSET) << 3) + ); +} +int get_CPUSync_Host0WaitList() { + return ( + MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_SE_OFFSET) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES0_OFFSET) << 1) | + (MEM_RW(CPUSYNC_BASE,CPUSYNC_HOST0_ES1_OFFSET) << 2) + ); +} + +// ----------------------------------------------------------------------------- +// Common memory tester functions ********************************************** +// ----------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------- +// Data Bus Walking, write same address with walking 1's data +//-------------------------------------------------------------------------- +uint32_t DataBusWalking(uint32_t BaseAddr, uint32_t Offset) { + uint32_t pattern = 0x0; + uint32_t rd_word = 0x0; + uint32_t ErrCnt = 0; + int i = 0; + + #ifdef TEST_VERBOSITY_HIGH + c_print("DataBusWalking at Addr=0x%08x\n", BaseAddr + Offset); + #endif + + for (i=0; i<32; i++) { + pattern = (1<<i); + HW_REG_WORD(BaseAddr, Offset) = pattern; + //c_print("wr(addr=0x%08x)=0x%08x (1 << %2d) \n", BaseAddr + Offset, pattern, i); + rd_word = HW_REG_WORD(BaseAddr, Offset); + if (rd_word != pattern) { + c_print("ERROR: read value not equal write pattern at Addr 0x%08x\n", BaseAddr + Offset); + ErrCnt++; + } + } + //reset memory content + HW_REG_WORD(BaseAddr, Offset) = 0x0; + + return ErrCnt; +} + + +// ------------------------------------------------- +// Function for walking ones across address bus (also checks start and end address) +// All writes followed by all reads +// ------------------------------------------------- +uint32_t AddressWalking(uint32_t BaseAddr, uint32_t TopAddr, uint32_t StartPos, uint32_t* RbAddr) { + uint32_t ErrCnt = 0; + uint32_t i = 0; + uint8_t Data_8 = 0; + uint32_t Data_32 = 0; + uint32_t Actual, addr, addr_actual; + uint32_t SEED; + + // Generate random seed with SV $urand function: + +#if (defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1)) && !defined(NO_TBOX) + TRICKBOX_IL->RND_MAX = 0xFFFF; + SEED = TRICKBOX_IL->RND_VAL; + srand(SEED); +#endif + + // Write data to memory. Walk 1 across address bus + c_print("Walking 1s on addr bus. Range 0x%08x - 0x%08x\n",BaseAddr,TopAddr); + + //check start address + c_print("Check start address.\n"); + Data_8 = (rand()%0x00FF); + //c_print("Wr 0x%08x to 0x%08x \n", Data_8, (BaseAddr)); + addr = (BaseAddr); + HW_REG_BYTE(addr, 0) = Data_8; + //c_print("Rd 0x%08x from 0x%08x \n",Data_8, (BaseAddr)); + Actual = HW_REG_BYTE(addr, 0); + if(Actual != Data_8) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr), Data_8, Actual); + ErrCnt++; + } +#if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } +#endif + + c_print("Moving 1s.\n"); + for(i=(1<<StartPos); ((BaseAddr|i)+4)<TopAddr; i=(i<<1)) { + + + if(0x4>i) + { + Data_8 = (rand()%0x00FF); + //c_print("Wr 0x%08x to 0x%08x \n", Data_8, (BaseAddr|i)); + addr = (BaseAddr|i); + HW_REG_BYTE(addr, 0) = Data_8; + //c_print("Rd 0x%08x from 0x%08x \n",Data_8, (BaseAddr|i)); + Actual = HW_REG_BYTE(addr, 0); + if(Actual != Data_8) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr|i), Data_8, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + } + else { + Data_32 = (rand()%0xFFFF)|(rand()%0xFFFF)<<16; + //c_print("Wr 0x%08x to 0x%08x \n", Data_32, (BaseAddr|i)); + addr = (BaseAddr|i); + HW_REG_WORD(addr, 0) = Data_32; + //c_print("Rd 0x%08x from 0x%08x \n",Data_32, (BaseAddr|i)); + Actual = HW_REG_WORD(addr, 0); + if(Actual != Data_32) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr|i), Data_32, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + } + } + + //check end address + c_print("Check end address.\n"); + Data_8 = (rand()%0x00FF); + //c_print("Wr 0x%08x to 0x%08x \n", Data_8, (TopAddr)); + addr = (TopAddr); + HW_REG_BYTE(addr, 0) = Data_8; + //c_print("Rd 0x%08x from 0x%08x \n",Data_8, (TopAddr)); + Actual = HW_REG_BYTE(addr, 0); + if(Actual != Data_8) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(TopAddr), Data_8, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + return(ErrCnt); +} + +// ------------------------------------------------- +// Function for walking ones across address bus (protected) +// All writes followed by all reads +// ------------------------------------------------- +uint32_t AddressWalkingP(uint32_t BaseAddr, uint32_t TopAddr, uint32_t BackupAddr, uint32_t Preserve, uint32_t StartPos, uint32_t* RbAddr) { + uint32_t ErrCnt = 0; + uint32_t i = 0, k = 0; + uint8_t Data_8 = 0; + uint32_t Data_32 = 0; + uint32_t Actual, addr, addr_actual; + uint32_t SEED; + + // Write data to memory. Walk 1 across address bus + c_print("Walking 1s on addr bus. Range 0x%08x - 0x%08x.\n",BaseAddr,TopAddr); + c_print("Backup the targeted mem range\n"); + + + // Backup the targeted mem range + if (Preserve == 1) { + for(i=1, k = 0; (BaseAddr|i)<TopAddr; i=(i<<1), k++) { + HW_REG_BYTE(BackupAddr, k) = HW_REG_BYTE((BaseAddr|i), 0); + } + } + + // Generate random seed with SV $urand function: + +#if (defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1)) && !defined(NO_TBOX) + TRICKBOX_IL->RND_MAX = 0xFFFF; + SEED = TRICKBOX_IL->RND_VAL; + srand(SEED); +#endif + + // Write data to memory. Walk 1 across address bus + c_print("Walking 1s on addr bus. Range 0x%08x - 0x%08x\n",BaseAddr,TopAddr); + + //check start address + c_print("Check start address.\n"); + Data_8 = (rand()%0x00FF); + //c_print("Wr 0x%08x to 0x%08x \n", Data_8, (BaseAddr)); + addr = (BaseAddr); + HW_REG_BYTE(addr, 0) = Data_8; + //c_print("Rd 0x%08x from 0x%08x \n",Data_8, (BaseAddr)); + Actual = HW_REG_BYTE(addr, 0); + if(Actual != Data_8) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr), Data_8, Actual); + ErrCnt++; + } + + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + + c_print("Moving 1s.\n"); + for(i=(1<<StartPos); ((BaseAddr|i)+4)<TopAddr; i=(i<<1)) { + + if(0x4>i) //byte access + { + Data_8 = (rand()%0x00FF); + //c_print("Wr 0x%08x to 0x%08x \n", Data_8, (BaseAddr|i)); + addr = (BaseAddr|i); + HW_REG_BYTE(addr, 0) = Data_8; + //c_print("Rd 0x%08x from 0x%08x \n",Data_8, (BaseAddr|i)); + Actual = HW_REG_BYTE(addr, 0); + if(Actual != Data_8) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr|i), Data_8, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + + + + + + + } + #endif + } + else + { //word access + Data_32 = (rand()%0xFFFF)|(rand()%0xFFFF)<<16; + //c_print("Wr 0x%08x to 0x%08x \n", Data_32, (BaseAddr|i)); + addr = (BaseAddr|i); + HW_REG_WORD(addr, 0) = Data_32; + //c_print("Rd 0x%08x from 0x%08x \n",Data_32, (BaseAddr|i)); + Actual = HW_REG_WORD(addr, 0); + if(Actual != Data_32) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr|i), Data_32, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + } + } + + //check end address + c_print("Check end address.\n"); + Data_8 = (rand()%0x00FF); + //c_print("Wr 0x%08x to 0x%08x \n", Data_8, (TopAddr)); + addr = (TopAddr); + HW_REG_BYTE(addr, 0) = Data_8; + //c_print("Rd 0x%08x from 0x%08x \n",Data_8, (TopAddr)); + Actual = HW_REG_BYTE(addr, 0); + if(Actual != Data_8) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(TopAddr), Data_8, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + + // Restore the targeted mem range + if (Preserve == 1) { + for(i=1, k=0; (BaseAddr|i)<TopAddr; i=(i<<1), k++) { + HW_REG_BYTE((BaseAddr|i), 0) = HW_REG_BYTE(BackupAddr, k); + } + } + + return(ErrCnt); +} + +// ------------------------------------------------- +// Word and half word accesses +// All writes followed by all reads +// ------------------------------------------------- +uint32_t WordHalfWordAccess(uint32_t BaseAddr, uint32_t TopAddr, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr) { + uint32_t ErrCnt = 0; + uint32_t i = 0; + uint16_t Data_16_0 = 0, Data_16_1 = 0; + uint32_t Data_32 = 0; + uint32_t Actual_32; + uint16_t Actual_16_0, Actual_16_1; + uint32_t Temp_data, addr_actual, addr; + uint32_t SEED; + + if ((TopAddr-BaseAddr) < MaxStep ) MaxStep = (TopAddr-BaseAddr)%MaxStep; + + // Generate random seed with SV $urand function: + +#if (defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1)) && !defined(NO_TBOX) + TRICKBOX_IL->RND_MAX = 0xFFFF; + SEED = TRICKBOX_IL->RND_VAL; + srand(SEED); +#endif + + c_print("Write/readback Words and Halfwords. Range 0x%08x - 0x%08x\n",BaseAddr,TopAddr); + i = (rand()<<2)%MaxStep; + i = i - (i%4); //aligned + while((i+8)<(TopAddr-BaseAddr)) { + Data_16_0 = (rand()%0xFFFF); + Data_16_1 = (rand()%0xFFFF); + Data_32 = (rand()%0xFFFF)|(rand()%0xFFFF)<<16; + + HW_REG_HALF(BaseAddr, i) = Data_16_0; //HALFWORD write + addr = BaseAddr + i; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + HW_REG_HALF(BaseAddr, i+2) = Data_16_1; //HALFWORD write + addr = BaseAddr + i +2; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + HW_REG_WORD(BaseAddr, i+4) = Data_32; //WORD write + addr = BaseAddr + i +4; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + Actual_32 = HW_REG_WORD(BaseAddr, i); //WORD read + Temp_data = (Data_16_1<<16)|Data_16_0; + if(Actual_32 != Temp_data) { + c_print("ERROR word rd: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr+i), Temp_data, Actual_32); + ErrCnt++; + } + + Actual_16_0 = HW_REG_HALF(BaseAddr, i+4); //HALFWORD read + Actual_16_1 = HW_REG_HALF(BaseAddr, i+6); //HALFWORD read + Temp_data = (Actual_16_1<<16)|Actual_16_0; + if(Data_32 != Temp_data) { + c_print("ERROR half rd: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr+i+4), Data_32, Temp_data); + ErrCnt++; + } + + i += (((rand()%0xFFFF)|(rand()%0xFFFF)<<16)%MaxStep)+MinOffset; + i = i - (i%4); //aligned + + } + + return(ErrCnt); +} + +uint32_t WordHalfWordAccessP(uint32_t BaseAddr, uint32_t TopAddr, uint32_t BackupAddr, uint32_t Preserve, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr){ + uint32_t ErrCnt = 0; + uint32_t i = 0; + uint16_t Data_16_0 = 0, Data_16_1 = 0; + uint32_t Data_32 = 0; + uint32_t Actual_32; + uint16_t Actual_16_0, Actual_16_1; + uint32_t Temp_data, addr_actual, addr; + uint32_t SEED; + + if ((TopAddr-BaseAddr) < MaxStep ) MaxStep = (TopAddr-BaseAddr)%MaxStep; + + // Generate random seed with SV $urand function: + +#if (defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1)) && !defined(NO_TBOX) + TRICKBOX_IL->RND_MAX = 0xFFFF; + SEED = TRICKBOX_IL->RND_VAL; + srand(SEED); +#endif + + c_print("Write/readback Words and Halfwords. Range 0x%08x - 0x%08x\n",BaseAddr,TopAddr); + i = (rand()<<2)%MaxStep; + i = i - (i%4); //aligned + while((i+8)<(TopAddr-BaseAddr)) { + Data_16_0 = (rand()%0xFFFF); + Data_16_1 = (rand()%0xFFFF); + Data_32 = (rand()%0xFFFF)|(rand()%0xFFFF)<<16; + if(Preserve) + {// Backup the targeted mem range + HW_REG_WORD(BackupAddr, 0) = HW_REG_WORD(BaseAddr, i); + HW_REG_WORD(BackupAddr, 4) = HW_REG_WORD(BaseAddr, i+4); + } + HW_REG_HALF(BaseAddr, i) = Data_16_0; //HALFWORD write + addr = BaseAddr + i; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + HW_REG_HALF(BaseAddr, i+2) = Data_16_1; //HALFWORD write + addr = BaseAddr + i +2; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + HW_REG_WORD(BaseAddr, i+4) = Data_32; //WORD write + addr = BaseAddr + i +4; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + Actual_32 = HW_REG_WORD(BaseAddr, i); //WORD read + Temp_data = (Data_16_1<<16)|Data_16_0; + if(Actual_32 != Temp_data) { + c_print("ERROR word rd: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr+i), Temp_data, Actual_32); + ErrCnt++; + } + + Actual_16_0 = HW_REG_HALF(BaseAddr, i+4); //HALFWORD read + Actual_16_1 = HW_REG_HALF(BaseAddr, i+6); //HALFWORD read + Temp_data = (Actual_16_1<<16)|Actual_16_0; + if(Data_32 != Temp_data) { + c_print("ERROR half rd: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr+i+4), Data_32, Temp_data); + ErrCnt++; + } + if(Preserve) {// Restore the targeted mem range + HW_REG_WORD(BaseAddr, i) = HW_REG_WORD(BackupAddr, 0); + HW_REG_WORD(BaseAddr, i+4) = HW_REG_WORD(BackupAddr, 4); + } + i += (((rand()%0xFFFF)|(rand()%0xFFFF)<<16)%MaxStep)+MinOffset; + i = i - (i%4); //aligned + + } + + return(ErrCnt); +} + + +// ------------------------------------------------- +// unaligned word accesses at each boundary +// All word writes followed by all reads one byte at a time +// ------------------------------------------------- +uint32_t UnalignedAccess(uint32_t BaseAddr, uint32_t TopAddr, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr) { + uint32_t ErrCnt = 0; + uint32_t i = 0; + uint32_t Data = 0; + uint32_t Actual, addr, addr_actual; + uint32_t SEED; + + if ((TopAddr-BaseAddr) < MaxStep ) MaxStep = (TopAddr-BaseAddr)%MaxStep; + + // Generate random seed with SV $urand function: + +#if (defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1)) && !defined(NO_TBOX) + TRICKBOX_IL->RND_MAX = 0xFFFF; + SEED = TRICKBOX_IL->RND_VAL; + srand(SEED); +#endif + + c_print("Write/readback unaligned words. Range 0x%08x - 0x%08x\n",BaseAddr,TopAddr); + i = rand()%MaxStep; + while((i+4)<(TopAddr-BaseAddr)) { + Data = (rand()%0xFFFF)|(rand()%0xFFFF)<<16; + //c_print("Wr 0x%08x to 0x%08x \n", Data, (BaseAddr+i)); + HW_REG_WORD(BaseAddr, i) = Data; + //c_print("Rd 0x%08x from 0x%08x \n", Data, (BaseAddr+i)); + Actual = HW_REG_WORD(BaseAddr, i); + if(Actual != Data) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr+i), Data, Actual); + ErrCnt++; + } + addr = BaseAddr + i; + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + i += 4+(((rand()%0xFFFF)|(rand()%0xFFFF)<<16)%MaxStep)+MinOffset; + + + } + + return(ErrCnt); +} + + +uint32_t UnalignedAccessP(uint32_t BaseAddr, uint32_t TopAddr, uint32_t BackupAddr, uint32_t Preserve, uint32_t MaxStep, uint32_t MinOffset, uint32_t* RbAddr) { + uint32_t ErrCnt = 0; + uint32_t i = 0; + uint32_t Data = 0; + uint32_t Actual, addr, addr_actual; + uint32_t SEED; + + c_print("Write/readback unaligned words. Range 0x%08x - 0x%08x.\n",BaseAddr,TopAddr); + c_print("Backup the targeted mem range\n"); + + if ((TopAddr-BaseAddr) < MaxStep ) MaxStep = (TopAddr-BaseAddr)%MaxStep; + + // Generate random seed with SV $urand function: + +#if (defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1)) && !defined(NO_TBOX) + TRICKBOX_IL->RND_MAX = 0xFFFF; + SEED = TRICKBOX_IL->RND_VAL; + srand(SEED); +#endif + + c_print("Write/readback unaligned words. Range 0x%08x - 0x%08x\n",BaseAddr,TopAddr); + i = rand()%MaxStep; + while((i+4)<(TopAddr-BaseAddr)) { + if(Preserve) HW_REG_WORD(BackupAddr, 0) = HW_REG_WORD(BaseAddr, i);// Backup the targeted mem range + Data = (rand()%0xFFFF)|(rand()%0xFFFF)<<16; + //c_print("Wr 0x%08x to 0x%08x \n", Data, (BaseAddr+i)); + HW_REG_WORD(BaseAddr, i) = Data; + //c_print("Rd 0x%08x from 0x%08x \n", Data, (BaseAddr+i)); + Actual = HW_REG_WORD(BaseAddr, i); + if(Actual != Data) { + c_print("ERROR: Addr 0x%08x, expected - 0x%08x actual - 0x%08x\n",(BaseAddr+i), Data, Actual); + ErrCnt++; + } + #if defined(SYNC_CPU_CM3_0) || defined(SYNC_CPU_CM3_1) + addr = BaseAddr + i; + //read back address + if(RbAddr != NULL) + { + addr_actual = *(RbAddr); + if(addr_actual != addr){ + c_print("ERROR: Addr read back, expected - 0x%08x actual - 0x%08x\n", addr, addr_actual); + ErrCnt++; + } + } + #endif + + if(Preserve) HW_REG_WORD(BaseAddr, i) = HW_REG_WORD(BackupAddr, 0);// Restore the targeted mem range + i += 4+(((rand()%0xFFFF)|(rand()%0xFFFF)<<16)%MaxStep)+MinOffset; + } + + return(ErrCnt); +} + +#endif //__SYSTEM_LEVEL_FUNCTIONS_C__ + + + + diff --git a/software/lib/sw_lib/devices/include/CMSDK.h b/software/lib/sw_lib/devices/include/CMSDK.h new file mode 100644 index 0000000000000000000000000000000000000000..10e6fab6a92f05473c3133fb148bed05181b2685 --- /dev/null +++ b/software/lib/sw_lib/devices/include/CMSDK.h @@ -0,0 +1,140 @@ +#include "sys_memory_map.h" + +#if defined ( __CC_ARM ) +#pragma anon_unions +#endif + + +/*------------- Universal Asynchronous Receiver Transmitter (UART) -----------*/ +/** @addtogroup CMSDK_UART CMSDK Universal Asynchronous Receiver/Transmitter + memory mapped structure for CMSDK_UART + @{ +*/ +typedef struct +{ + volatile uint32_t DATA; /*!< Offset: 0x000 Data Register (R/W) */ + volatile uint32_t STATE; /*!< Offset: 0x004 Status Register (R/W) */ + volatile uint32_t CTRL; /*!< Offset: 0x008 Control Register (R/W) */ + union { + volatile uint32_t INTSTATUS; /*!< Offset: 0x00C Interrupt Status Register (R/ ) */ + volatile uint32_t INTCLEAR; /*!< Offset: 0x00C Interrupt Clear Register ( /W) */ + }; + volatile uint32_t BAUDDIV; /*!< Offset: 0x010 Baudrate Divider Register (R/W) */ + +} CMSDK_UART_TypeDef; + +/* CMSDK_UART DATA Register Definitions */ + +#define CMSDK_UART_DATA_Pos 0 /*!< CMSDK_UART_DATA_Pos: DATA Position */ +#define CMSDK_UART_DATA_Msk (0xFFul << CMSDK_UART_DATA_Pos) /*!< CMSDK_UART DATA: DATA Mask */ + +#define CMSDK_UART_STATE_RXOR_Pos 3 /*!< CMSDK_UART STATE: RXOR Position */ +#define CMSDK_UART_STATE_RXOR_Msk (0x1ul << CMSDK_UART_STATE_RXOR_Pos) /*!< CMSDK_UART STATE: RXOR Mask */ + +#define CMSDK_UART_STATE_TXOR_Pos 2 /*!< CMSDK_UART STATE: TXOR Position */ +#define CMSDK_UART_STATE_TXOR_Msk (0x1ul << CMSDK_UART_STATE_TXOR_Pos) /*!< CMSDK_UART STATE: TXOR Mask */ + +#define CMSDK_UART_STATE_RXBF_Pos 1 /*!< CMSDK_UART STATE: RXBF Position */ +#define CMSDK_UART_STATE_RXBF_Msk (0x1ul << CMSDK_UART_STATE_RXBF_Pos) /*!< CMSDK_UART STATE: RXBF Mask */ + +#define CMSDK_UART_STATE_TXBF_Pos 0 /*!< CMSDK_UART STATE: TXBF Position */ +#define CMSDK_UART_STATE_TXBF_Msk (0x1ul << CMSDK_UART_STATE_TXBF_Pos ) /*!< CMSDK_UART STATE: TXBF Mask */ + +#define CMSDK_UART_CTRL_HSTM_Pos 6 /*!< CMSDK_UART CTRL: HSTM Position */ +#define CMSDK_UART_CTRL_HSTM_Msk (0x01ul << CMSDK_UART_CTRL_HSTM_Pos) /*!< CMSDK_UART CTRL: HSTM Mask */ + +#define CMSDK_UART_CTRL_RXORIRQEN_Pos 5 /*!< CMSDK_UART CTRL: RXORIRQEN Position */ +#define CMSDK_UART_CTRL_RXORIRQEN_Msk (0x01ul << CMSDK_UART_CTRL_RXORIRQEN_Pos) /*!< CMSDK_UART CTRL: RXORIRQEN Mask */ + +#define CMSDK_UART_CTRL_TXORIRQEN_Pos 4 /*!< CMSDK_UART CTRL: TXORIRQEN Position */ +#define CMSDK_UART_CTRL_TXORIRQEN_Msk (0x01ul << CMSDK_UART_CTRL_TXORIRQEN_Pos) /*!< CMSDK_UART CTRL: TXORIRQEN Mask */ + +#define CMSDK_UART_CTRL_RXIRQEN_Pos 3 /*!< CMSDK_UART CTRL: RXIRQEN Position */ +#define CMSDK_UART_CTRL_RXIRQEN_Msk (0x01ul << CMSDK_UART_CTRL_RXIRQEN_Pos) /*!< CMSDK_UART CTRL: RXIRQEN Mask */ + +#define CMSDK_UART_CTRL_TXIRQEN_Pos 2 /*!< CMSDK_UART CTRL: TXIRQEN Position */ +#define CMSDK_UART_CTRL_TXIRQEN_Msk (0x01ul << CMSDK_UART_CTRL_TXIRQEN_Pos) /*!< CMSDK_UART CTRL: TXIRQEN Mask */ + +#define CMSDK_UART_CTRL_RXEN_Pos 1 /*!< CMSDK_UART CTRL: RXEN Position */ +#define CMSDK_UART_CTRL_RXEN_Msk (0x01ul << CMSDK_UART_CTRL_RXEN_Pos) /*!< CMSDK_UART CTRL: RXEN Mask */ + +#define CMSDK_UART_CTRL_TXEN_Pos 0 /*!< CMSDK_UART CTRL: TXEN Position */ +#define CMSDK_UART_CTRL_TXEN_Msk (0x01ul << CMSDK_UART_CTRL_TXEN_Pos) /*!< CMSDK_UART CTRL: TXEN Mask */ + +#define CMSDK_UART_INTSTATUS_RXORIRQ_Pos 3 /*!< CMSDK_UART CTRL: RXORIRQ Position */ +#define CMSDK_UART_CTRL_RXORIRQ_Msk (0x01ul << CMSDK_UART_INTSTATUS_RXORIRQ_Pos) /*!< CMSDK_UART CTRL: RXORIRQ Mask */ + +#define CMSDK_UART_CTRL_TXORIRQ_Pos 2 /*!< CMSDK_UART CTRL: TXORIRQ Position */ +#define CMSDK_UART_CTRL_TXORIRQ_Msk (0x01ul << CMSDK_UART_CTRL_TXORIRQ_Pos) /*!< CMSDK_UART CTRL: TXORIRQ Mask */ + +#define CMSDK_UART_CTRL_RXIRQ_Pos 1 /*!< CMSDK_UART CTRL: RXIRQ Position */ +#define CMSDK_UART_CTRL_RXIRQ_Msk (0x01ul << CMSDK_UART_CTRL_RXIRQ_Pos) /*!< CMSDK_UART CTRL: RXIRQ Mask */ + +#define CMSDK_UART_CTRL_TXIRQ_Pos 0 /*!< CMSDK_UART CTRL: TXIRQ Position */ +#define CMSDK_UART_CTRL_TXIRQ_Msk (0x01ul << CMSDK_UART_CTRL_TXIRQ_Pos) /*!< CMSDK_UART CTRL: TXIRQ Mask */ + +#define CMSDK_UART_BAUDDIV_Pos 0 /*!< CMSDK_UART BAUDDIV: BAUDDIV Position */ +#define CMSDK_UART_BAUDDIV_Msk (0xFFFFFul << CMSDK_UART_BAUDDIV_Pos) /*!< CMSDK_UART BAUDDIV: BAUDDIV Mask */ + +/*@}*/ /* end of group CMSDK_UART */ + + +#define CMSDK_UART2 ((CMSDK_UART_TypeDef *) SYS_UART0_BASE ) + +/*----------------------------- Timer (TIMER) -------------------------------*/ +/** @addtogroup CMSDK_TIMER CMSDK Timer + @{ +*/ +typedef struct +{ + __IO uint32_t CTRL; /*!< Offset: 0x000 Control Register (R/W) */ + __IO uint32_t VALUE; /*!< Offset: 0x004 Current Value Register (R/W) */ + __IO uint32_t RELOAD; /*!< Offset: 0x008 Reload Value Register (R/W) */ + union { + __I uint32_t INTSTATUS; /*!< Offset: 0x00C Interrupt Status Register (R/ ) */ + __O uint32_t INTCLEAR; /*!< Offset: 0x00C Interrupt Clear Register ( /W) */ + }; + uint32_t RESERVED[1008]; // 0x010 - 0xFD0 + __I uint32_t PID4; + __I uint32_t PID5; + __I uint32_t PID6; + __I uint32_t PID7; + __I uint32_t PID0; + __I uint32_t PID1; + __I uint32_t PID2; + __I uint32_t PID3; + __I uint32_t CID0; + __I uint32_t CID1; + __I uint32_t CID2; + __I uint32_t CID3; +} CMSDK_TIMER_TypeDef; + +/* CMSDK_TIMER CTRL Register Definitions */ + +#define CMSDK_TIMER_CTRL_IRQEN_Pos 3 /*!< CMSDK_TIMER CTRL: IRQEN Position */ +#define CMSDK_TIMER_CTRL_IRQEN_Msk (0x01ul << CMSDK_TIMER_CTRL_IRQEN_Pos) /*!< CMSDK_TIMER CTRL: IRQEN Mask */ + +#define CMSDK_TIMER_CTRL_SELEXTCLK_Pos 2 /*!< CMSDK_TIMER CTRL: SELEXTCLK Position */ +#define CMSDK_TIMER_CTRL_SELEXTCLK_Msk (0x01ul << CMSDK_TIMER_CTRL_SELEXTCLK_Pos) /*!< CMSDK_TIMER CTRL: SELEXTCLK Mask */ + +#define CMSDK_TIMER_CTRL_SELEXTEN_Pos 1 /*!< CMSDK_TIMER CTRL: SELEXTEN Position */ +#define CMSDK_TIMER_CTRL_SELEXTEN_Msk (0x01ul << CMSDK_TIMER_CTRL_SELEXTEN_Pos) /*!< CMSDK_TIMER CTRL: SELEXTEN Mask */ + +#define CMSDK_TIMER_CTRL_EN_Pos 0 /*!< CMSDK_TIMER CTRL: EN Position */ +#define CMSDK_TIMER_CTRL_EN_Msk (0x01ul << CMSDK_TIMER_CTRL_EN_Pos) /*!< CMSDK_TIMER CTRL: EN Mask */ + +#define CMSDK_TIMER_VAL_CURRENT_Pos 0 /*!< CMSDK_TIMER VALUE: CURRENT Position */ +#define CMSDK_TIMER_VAL_CURRENT_Msk (0xFFFFFFFFul << CMSDK_TIMER_VAL_CURRENT_Pos) /*!< CMSDK_TIMER VALUE: CURRENT Mask */ + +#define CMSDK_TIMER_RELOAD_VAL_Pos 0 /*!< CMSDK_TIMER RELOAD: RELOAD Position */ +#define CMSDK_TIMER_RELOAD_VAL_Msk (0xFFFFFFFFul << CMSDK_TIMER_RELOAD_VAL_Pos) /*!< CMSDK_TIMER RELOAD: RELOAD Mask */ + +#define CMSDK_TIMER_INTSTATUS_Pos 0 /*!< CMSDK_TIMER INTSTATUS: INTSTATUSPosition */ +#define CMSDK_TIMER_INTSTATUS_Msk (0x01ul << CMSDK_TIMER_INTSTATUS_Pos) /*!< CMSDK_TIMER INTSTATUS: INTSTATUSMask */ + +#define CMSDK_TIMER_INTCLEAR_Pos 0 /*!< CMSDK_TIMER INTCLEAR: INTCLEAR Position */ +#define CMSDK_TIMER_INTCLEAR_Msk (0x01ul << CMSDK_TIMER_INTCLEAR_Pos) /*!< CMSDK_TIMER INTCLEAR: INTCLEAR Mask */ + +/*@}*/ /* end of group CMSDK_TIMER */ + +#define CMSDK_TIMER0 ((CMSDK_TIMER_TypeDef *) TIMER0_BASE ) \ No newline at end of file diff --git a/software/lib/sw_lib/devices/include/arm_bf16.h b/software/lib/sw_lib/devices/include/arm_bf16.h new file mode 100644 index 0000000000000000000000000000000000000000..329ae39e625dad80cdbbc67895f0bb52655dcd0f --- /dev/null +++ b/software/lib/sw_lib/devices/include/arm_bf16.h @@ -0,0 +1,20 @@ +/*===---- arm_bf16.h - ARM BF16 intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_BF16_H +#define __ARM_BF16_H + +typedef __bf16 bfloat16_t; +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + + +#undef __ai + +#endif diff --git a/software/lib/sw_lib/devices/include/arm_neon.h b/software/lib/sw_lib/devices/include/arm_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..9aeaf71a0385782e346104db000588304ec71ed1 --- /dev/null +++ b/software/lib/sw_lib/devices/include/arm_neon.h @@ -0,0 +1,69643 @@ +/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_H +#define __ARM_NEON_H + +#ifndef __ARM_FP +#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard" +#else + +#if !defined(__ARM_NEON) +#error "NEON support not enabled" +#else + +#include <stdint.h> + +#include <arm_bf16.h> +#include <arm_vector_types.h> +#ifdef __aarch64__ +typedef uint8_t poly8_t; +typedef uint16_t poly16_t; +typedef uint64_t poly64_t; +typedef __uint128_t poly128_t; +#else +typedef int8_t poly8_t; +typedef int16_t poly16_t; +typedef int64_t poly64_t; +#endif +typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t; +typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t; +typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t; +typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t; +typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t; +typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t; + +typedef struct poly8x8x2_t { + poly8x8_t val[2]; +} poly8x8x2_t; + +typedef struct poly8x16x2_t { + poly8x16_t val[2]; +} poly8x16x2_t; + +typedef struct poly16x4x2_t { + poly16x4_t val[2]; +} poly16x4x2_t; + +typedef struct poly16x8x2_t { + poly16x8_t val[2]; +} poly16x8x2_t; + +typedef struct poly64x1x2_t { + poly64x1_t val[2]; +} poly64x1x2_t; + +typedef struct poly64x2x2_t { + poly64x2_t val[2]; +} poly64x2x2_t; + +typedef struct poly8x8x3_t { + poly8x8_t val[3]; +} poly8x8x3_t; + +typedef struct poly8x16x3_t { + poly8x16_t val[3]; +} poly8x16x3_t; + +typedef struct poly16x4x3_t { + poly16x4_t val[3]; +} poly16x4x3_t; + +typedef struct poly16x8x3_t { + poly16x8_t val[3]; +} poly16x8x3_t; + +typedef struct poly64x1x3_t { + poly64x1_t val[3]; +} poly64x1x3_t; + +typedef struct poly64x2x3_t { + poly64x2_t val[3]; +} poly64x2x3_t; + +typedef struct poly8x8x4_t { + poly8x8_t val[4]; +} poly8x8x4_t; + +typedef struct poly8x16x4_t { + poly8x16_t val[4]; +} poly8x16x4_t; + +typedef struct poly16x4x4_t { + poly16x4_t val[4]; +} poly16x4x4_t; + +typedef struct poly16x8x4_t { + poly16x8_t val[4]; +} poly16x8x4_t; + +typedef struct poly64x1x4_t { + poly64x1_t val[4]; +} poly64x1x4_t; + +typedef struct poly64x2x4_t { + poly64x2_t val[4]; +} poly64x2x4_t; + +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#else +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#endif + +#define splat_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#else +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#else +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#else +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#else +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#else +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#else +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#else +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#define splat_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#define splat_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#else +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#else +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#define splat_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#else +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#else +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret; \ +}) +#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#else +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#else +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#else +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#else +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret; \ +}) +#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret; \ +}) +#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret; \ +}) +#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); + return __ret; +} +#else +__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + poly8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); + return __ret; +} +#else +__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + poly16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + poly8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); + return __ret; +} +#else +__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + poly16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); + return __ret; +} +#else +__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int16x4_t)(__promote); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \ + poly8x8_t __ret_0; \ + poly8x8_t __s0_0 = __p0_0; \ + __ret_0 = splat_lane_p8(__s0_0, __p1_0); \ + __ret_0; \ +}) +#else +#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \ + poly8x8_t __ret_1; \ + poly8x8_t __s0_1 = __p0_1; \ + poly8x8_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \ + __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \ + poly16x4_t __ret_2; \ + poly16x4_t __s0_2 = __p0_2; \ + __ret_2 = splat_lane_p16(__s0_2, __p1_2); \ + __ret_2; \ +}) +#else +#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \ + poly16x4_t __ret_3; \ + poly16x4_t __s0_3 = __p0_3; \ + poly16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ + __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \ + __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ + __ret_3; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \ + poly8x16_t __ret_4; \ + poly8x8_t __s0_4 = __p0_4; \ + __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \ + __ret_4; \ +}) +#else +#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \ + poly8x16_t __ret_5; \ + poly8x8_t __s0_5 = __p0_5; \ + poly8x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \ + __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \ + poly16x8_t __ret_6; \ + poly16x4_t __s0_6 = __p0_6; \ + __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \ + __ret_6; \ +}) +#else +#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \ + poly16x8_t __ret_7; \ + poly16x4_t __s0_7 = __p0_7; \ + poly16x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \ + __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \ + __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_7; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \ + uint8x16_t __ret_8; \ + uint8x8_t __s0_8 = __p0_8; \ + __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \ + __ret_8; \ +}) +#else +#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \ + uint8x16_t __ret_9; \ + uint8x8_t __s0_9 = __p0_9; \ + uint8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \ + __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \ + uint32x4_t __ret_10; \ + uint32x2_t __s0_10 = __p0_10; \ + __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \ + __ret_10; \ +}) +#else +#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \ + uint32x4_t __ret_11; \ + uint32x2_t __s0_11 = __p0_11; \ + uint32x2_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \ + __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \ + __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ + __ret_11; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \ + uint64x2_t __ret_12; \ + uint64x1_t __s0_12 = __p0_12; \ + __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \ + __ret_12; \ +}) +#else +#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \ + uint64x2_t __ret_13; \ + uint64x1_t __s0_13 = __p0_13; \ + __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \ + __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \ + __ret_13; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \ + uint16x8_t __ret_14; \ + uint16x4_t __s0_14 = __p0_14; \ + __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \ + __ret_14; \ +}) +#else +#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \ + uint16x8_t __ret_15; \ + uint16x4_t __s0_15 = __p0_15; \ + uint16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ + __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \ + __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_15; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \ + int8x16_t __ret_16; \ + int8x8_t __s0_16 = __p0_16; \ + __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \ + __ret_16; \ +}) +#else +#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \ + int8x16_t __ret_17; \ + int8x8_t __s0_17 = __p0_17; \ + int8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \ + __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \ + float32x4_t __ret_18; \ + float32x2_t __s0_18 = __p0_18; \ + __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \ + __ret_18; \ +}) +#else +#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \ + float32x4_t __ret_19; \ + float32x2_t __s0_19 = __p0_19; \ + float32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ + __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \ + __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ + __ret_19; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f16(__p0_20, __p1_20) __extension__ ({ \ + float16x8_t __ret_20; \ + float16x4_t __s0_20 = __p0_20; \ + __ret_20 = splatq_lane_f16(__s0_20, __p1_20); \ + __ret_20; \ +}) +#else +#define vdupq_lane_f16(__p0_21, __p1_21) __extension__ ({ \ + float16x8_t __ret_21; \ + float16x4_t __s0_21 = __p0_21; \ + float16x4_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 3, 2, 1, 0); \ + __ret_21 = __noswap_splatq_lane_f16(__rev0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_21; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s32(__p0_22, __p1_22) __extension__ ({ \ + int32x4_t __ret_22; \ + int32x2_t __s0_22 = __p0_22; \ + __ret_22 = splatq_lane_s32(__s0_22, __p1_22); \ + __ret_22; \ +}) +#else +#define vdupq_lane_s32(__p0_23, __p1_23) __extension__ ({ \ + int32x4_t __ret_23; \ + int32x2_t __s0_23 = __p0_23; \ + int32x2_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 1, 0); \ + __ret_23 = __noswap_splatq_lane_s32(__rev0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \ + __ret_23; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s64(__p0_24, __p1_24) __extension__ ({ \ + int64x2_t __ret_24; \ + int64x1_t __s0_24 = __p0_24; \ + __ret_24 = splatq_lane_s64(__s0_24, __p1_24); \ + __ret_24; \ +}) +#else +#define vdupq_lane_s64(__p0_25, __p1_25) __extension__ ({ \ + int64x2_t __ret_25; \ + int64x1_t __s0_25 = __p0_25; \ + __ret_25 = __noswap_splatq_lane_s64(__s0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \ + __ret_25; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s16(__p0_26, __p1_26) __extension__ ({ \ + int16x8_t __ret_26; \ + int16x4_t __s0_26 = __p0_26; \ + __ret_26 = splatq_lane_s16(__s0_26, __p1_26); \ + __ret_26; \ +}) +#else +#define vdupq_lane_s16(__p0_27, __p1_27) __extension__ ({ \ + int16x8_t __ret_27; \ + int16x4_t __s0_27 = __p0_27; \ + int16x4_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \ + __ret_27 = __noswap_splatq_lane_s16(__rev0_27, __p1_27); \ + __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_27; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u8(__p0_28, __p1_28) __extension__ ({ \ + uint8x8_t __ret_28; \ + uint8x8_t __s0_28 = __p0_28; \ + __ret_28 = splat_lane_u8(__s0_28, __p1_28); \ + __ret_28; \ +}) +#else +#define vdup_lane_u8(__p0_29, __p1_29) __extension__ ({ \ + uint8x8_t __ret_29; \ + uint8x8_t __s0_29 = __p0_29; \ + uint8x8_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_29 = __noswap_splat_lane_u8(__rev0_29, __p1_29); \ + __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_29; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u32(__p0_30, __p1_30) __extension__ ({ \ + uint32x2_t __ret_30; \ + uint32x2_t __s0_30 = __p0_30; \ + __ret_30 = splat_lane_u32(__s0_30, __p1_30); \ + __ret_30; \ +}) +#else +#define vdup_lane_u32(__p0_31, __p1_31) __extension__ ({ \ + uint32x2_t __ret_31; \ + uint32x2_t __s0_31 = __p0_31; \ + uint32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ + __ret_31 = __noswap_splat_lane_u32(__rev0_31, __p1_31); \ + __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \ + __ret_31; \ +}) +#endif + +#define vdup_lane_u64(__p0_32, __p1_32) __extension__ ({ \ + uint64x1_t __ret_32; \ + uint64x1_t __s0_32 = __p0_32; \ + __ret_32 = splat_lane_u64(__s0_32, __p1_32); \ + __ret_32; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u16(__p0_33, __p1_33) __extension__ ({ \ + uint16x4_t __ret_33; \ + uint16x4_t __s0_33 = __p0_33; \ + __ret_33 = splat_lane_u16(__s0_33, __p1_33); \ + __ret_33; \ +}) +#else +#define vdup_lane_u16(__p0_34, __p1_34) __extension__ ({ \ + uint16x4_t __ret_34; \ + uint16x4_t __s0_34 = __p0_34; \ + uint16x4_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 3, 2, 1, 0); \ + __ret_34 = __noswap_splat_lane_u16(__rev0_34, __p1_34); \ + __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 3, 2, 1, 0); \ + __ret_34; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s8(__p0_35, __p1_35) __extension__ ({ \ + int8x8_t __ret_35; \ + int8x8_t __s0_35 = __p0_35; \ + __ret_35 = splat_lane_s8(__s0_35, __p1_35); \ + __ret_35; \ +}) +#else +#define vdup_lane_s8(__p0_36, __p1_36) __extension__ ({ \ + int8x8_t __ret_36; \ + int8x8_t __s0_36 = __p0_36; \ + int8x8_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_36 = __noswap_splat_lane_s8(__rev0_36, __p1_36); \ + __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_36; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f32(__p0_37, __p1_37) __extension__ ({ \ + float32x2_t __ret_37; \ + float32x2_t __s0_37 = __p0_37; \ + __ret_37 = splat_lane_f32(__s0_37, __p1_37); \ + __ret_37; \ +}) +#else +#define vdup_lane_f32(__p0_38, __p1_38) __extension__ ({ \ + float32x2_t __ret_38; \ + float32x2_t __s0_38 = __p0_38; \ + float32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ + __ret_38 = __noswap_splat_lane_f32(__rev0_38, __p1_38); \ + __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \ + __ret_38; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f16(__p0_39, __p1_39) __extension__ ({ \ + float16x4_t __ret_39; \ + float16x4_t __s0_39 = __p0_39; \ + __ret_39 = splat_lane_f16(__s0_39, __p1_39); \ + __ret_39; \ +}) +#else +#define vdup_lane_f16(__p0_40, __p1_40) __extension__ ({ \ + float16x4_t __ret_40; \ + float16x4_t __s0_40 = __p0_40; \ + float16x4_t __rev0_40; __rev0_40 = __builtin_shufflevector(__s0_40, __s0_40, 3, 2, 1, 0); \ + __ret_40 = __noswap_splat_lane_f16(__rev0_40, __p1_40); \ + __ret_40 = __builtin_shufflevector(__ret_40, __ret_40, 3, 2, 1, 0); \ + __ret_40; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s32(__p0_41, __p1_41) __extension__ ({ \ + int32x2_t __ret_41; \ + int32x2_t __s0_41 = __p0_41; \ + __ret_41 = splat_lane_s32(__s0_41, __p1_41); \ + __ret_41; \ +}) +#else +#define vdup_lane_s32(__p0_42, __p1_42) __extension__ ({ \ + int32x2_t __ret_42; \ + int32x2_t __s0_42 = __p0_42; \ + int32x2_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 1, 0); \ + __ret_42 = __noswap_splat_lane_s32(__rev0_42, __p1_42); \ + __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 1, 0); \ + __ret_42; \ +}) +#endif + +#define vdup_lane_s64(__p0_43, __p1_43) __extension__ ({ \ + int64x1_t __ret_43; \ + int64x1_t __s0_43 = __p0_43; \ + __ret_43 = splat_lane_s64(__s0_43, __p1_43); \ + __ret_43; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s16(__p0_44, __p1_44) __extension__ ({ \ + int16x4_t __ret_44; \ + int16x4_t __s0_44 = __p0_44; \ + __ret_44 = splat_lane_s16(__s0_44, __p1_44); \ + __ret_44; \ +}) +#else +#define vdup_lane_s16(__p0_45, __p1_45) __extension__ ({ \ + int16x4_t __ret_45; \ + int16x4_t __s0_45 = __p0_45; \ + int16x4_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \ + __ret_45 = __noswap_splat_lane_s16(__rev0_45, __p1_45); \ + __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \ + __ret_45; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vdup_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdup_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vdup_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x2(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x2(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x3(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x3(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x4(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x4(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __ret; \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ + __ret; \ +}) +#else +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __ret; \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __ret; \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ + __ret; \ +}) +#else +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __ret; \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __ret; \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ + __ret; \ +}) +#else +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __ret; \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __ret; \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ + __ret; \ +}) +#else +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __ret; \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __ret; \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ + __ret; \ +}) +#else +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __ret; \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __ret; \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ + __ret; \ +}) +#else +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __ret; \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __ret; \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ + __ret; \ +}) +#else +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __ret; \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __ret; \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ + __ret; \ +}) +#else +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __ret; \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __ret; \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ + __ret; \ +}) +#else +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __ret; \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __ret; \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ + __ret; \ +}) +#else +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __ret; \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __ret; \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ + __ret; \ +}) +#else +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __ret; \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __ret; \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ + __ret; \ +}) +#else +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __ret; \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __ret; \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ + __ret; \ +}) +#else +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __ret; \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __ret; \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ + __ret; \ +}) +#else +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __ret; \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __ret; \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ + __ret; \ +}) +#else +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __ret; \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __ret; \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ + __ret; \ +}) +#else +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __ret; \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __ret; \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ + __ret; \ +}) +#else +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __ret; \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __ret; \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ + __ret; \ +}) +#else +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __ret; \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __ret; \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ + __ret; \ +}) +#else +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __ret; \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __ret; \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ + __ret; \ +}) +#else +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __ret; \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __ret; \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ + __ret; \ +}) +#else +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __ret; \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __ret; \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ + __ret; \ +}) +#else +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __ret; \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __ret; \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ + __ret; \ +}) +#else +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __ret; \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __ret; \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ + __ret; \ +}) +#else +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __ret; \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __ret; \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ + __ret; \ +}) +#else +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __ret; \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __ret; \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ + __ret; \ +}) +#else +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __ret; \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __ret; \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ + __ret; \ +}) +#else +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __ret; \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __ret; \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ + __ret; \ +}) +#else +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __ret; \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __ret; \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ + __ret; \ +}) +#else +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __ret; \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __ret; \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ + __ret; \ +}) +#else +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __ret; \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __ret; \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __ret; \ +}) +#else +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __ret; \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __ret; \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __ret; \ +}) +#else +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __ret; \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __ret; \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __ret; \ +}) +#else +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __ret; \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __ret; \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __ret; \ +}) +#else +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __ret; \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __ret; \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ + __ret; \ +}) +#else +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __ret; \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __ret; \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ + __ret; \ +}) +#else +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __ret; \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __ret; \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ + __ret; \ +}) +#else +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __ret; \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __ret; \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ + __ret; \ +}) +#else +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __ret; \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __ret; \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __ret; \ +}) +#else +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __ret; \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __ret; \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __ret; \ +}) +#else +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __ret; \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __ret; \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __ret; \ +}) +#else +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __ret; \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __ret; \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __ret; \ +}) +#else +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __ret; \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __ret; \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ + __ret; \ +}) +#else +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __ret; \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __ret; \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ + __ret; \ +}) +#else +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __ret; \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __ret; \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ + __ret; \ +}) +#else +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __ret; \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ + uint32x4_t __ret_46; \ + uint32x4_t __s0_46 = __p0_46; \ + uint32x4_t __s1_46 = __p1_46; \ + uint32x2_t __s2_46 = __p2_46; \ + __ret_46 = __s0_46 + __s1_46 * splatq_lane_u32(__s2_46, __p3_46); \ + __ret_46; \ +}) +#else +#define vmlaq_lane_u32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ + uint32x4_t __ret_47; \ + uint32x4_t __s0_47 = __p0_47; \ + uint32x4_t __s1_47 = __p1_47; \ + uint32x2_t __s2_47 = __p2_47; \ + uint32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ + uint32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ + uint32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ + __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_u32(__rev2_47, __p3_47); \ + __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \ + __ret_47; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ + uint16x8_t __ret_48; \ + uint16x8_t __s0_48 = __p0_48; \ + uint16x8_t __s1_48 = __p1_48; \ + uint16x4_t __s2_48 = __p2_48; \ + __ret_48 = __s0_48 + __s1_48 * splatq_lane_u16(__s2_48, __p3_48); \ + __ret_48; \ +}) +#else +#define vmlaq_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ + uint16x8_t __ret_49; \ + uint16x8_t __s0_49 = __p0_49; \ + uint16x8_t __s1_49 = __p1_49; \ + uint16x4_t __s2_49 = __p2_49; \ + uint16x8_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \ + __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_u16(__rev2_49, __p3_49); \ + __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_49; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_f32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ + float32x4_t __ret_50; \ + float32x4_t __s0_50 = __p0_50; \ + float32x4_t __s1_50 = __p1_50; \ + float32x2_t __s2_50 = __p2_50; \ + __ret_50 = __s0_50 + __s1_50 * splatq_lane_f32(__s2_50, __p3_50); \ + __ret_50; \ +}) +#else +#define vmlaq_lane_f32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ + float32x4_t __ret_51; \ + float32x4_t __s0_51 = __p0_51; \ + float32x4_t __s1_51 = __p1_51; \ + float32x2_t __s2_51 = __p2_51; \ + float32x4_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \ + float32x4_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 3, 2, 1, 0); \ + float32x2_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \ + __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_f32(__rev2_51, __p3_51); \ + __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \ + __ret_51; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ + int32x4_t __ret_52; \ + int32x4_t __s0_52 = __p0_52; \ + int32x4_t __s1_52 = __p1_52; \ + int32x2_t __s2_52 = __p2_52; \ + __ret_52 = __s0_52 + __s1_52 * splatq_lane_s32(__s2_52, __p3_52); \ + __ret_52; \ +}) +#else +#define vmlaq_lane_s32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ + int32x4_t __ret_53; \ + int32x4_t __s0_53 = __p0_53; \ + int32x4_t __s1_53 = __p1_53; \ + int32x2_t __s2_53 = __p2_53; \ + int32x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ + int32x4_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 3, 2, 1, 0); \ + int32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ + __ret_53 = __rev0_53 + __rev1_53 * __noswap_splatq_lane_s32(__rev2_53, __p3_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ + __ret_53; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + int16x8_t __ret_54; \ + int16x8_t __s0_54 = __p0_54; \ + int16x8_t __s1_54 = __p1_54; \ + int16x4_t __s2_54 = __p2_54; \ + __ret_54 = __s0_54 + __s1_54 * splatq_lane_s16(__s2_54, __p3_54); \ + __ret_54; \ +}) +#else +#define vmlaq_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + int16x8_t __ret_55; \ + int16x8_t __s0_55 = __p0_55; \ + int16x8_t __s1_55 = __p1_55; \ + int16x4_t __s2_55 = __p2_55; \ + int16x8_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ + __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_s16(__rev2_55, __p3_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_55; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + uint32x2_t __ret_56; \ + uint32x2_t __s0_56 = __p0_56; \ + uint32x2_t __s1_56 = __p1_56; \ + uint32x2_t __s2_56 = __p2_56; \ + __ret_56 = __s0_56 + __s1_56 * splat_lane_u32(__s2_56, __p3_56); \ + __ret_56; \ +}) +#else +#define vmla_lane_u32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + uint32x2_t __ret_57; \ + uint32x2_t __s0_57 = __p0_57; \ + uint32x2_t __s1_57 = __p1_57; \ + uint32x2_t __s2_57 = __p2_57; \ + uint32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ + uint32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ + uint32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ + __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_u32(__rev2_57, __p3_57); \ + __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \ + __ret_57; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + uint16x4_t __ret_58; \ + uint16x4_t __s0_58 = __p0_58; \ + uint16x4_t __s1_58 = __p1_58; \ + uint16x4_t __s2_58 = __p2_58; \ + __ret_58 = __s0_58 + __s1_58 * splat_lane_u16(__s2_58, __p3_58); \ + __ret_58; \ +}) +#else +#define vmla_lane_u16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + uint16x4_t __ret_59; \ + uint16x4_t __s0_59 = __p0_59; \ + uint16x4_t __s1_59 = __p1_59; \ + uint16x4_t __s2_59 = __p2_59; \ + uint16x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ + uint16x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ + uint16x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \ + __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_u16(__rev2_59, __p3_59); \ + __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ + __ret_59; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_f32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + float32x2_t __ret_60; \ + float32x2_t __s0_60 = __p0_60; \ + float32x2_t __s1_60 = __p1_60; \ + float32x2_t __s2_60 = __p2_60; \ + __ret_60 = __s0_60 + __s1_60 * splat_lane_f32(__s2_60, __p3_60); \ + __ret_60; \ +}) +#else +#define vmla_lane_f32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + float32x2_t __ret_61; \ + float32x2_t __s0_61 = __p0_61; \ + float32x2_t __s1_61 = __p1_61; \ + float32x2_t __s2_61 = __p2_61; \ + float32x2_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 1, 0); \ + float32x2_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 1, 0); \ + float32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ + __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_f32(__rev2_61, __p3_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 1, 0); \ + __ret_61; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + int32x2_t __ret_62; \ + int32x2_t __s0_62 = __p0_62; \ + int32x2_t __s1_62 = __p1_62; \ + int32x2_t __s2_62 = __p2_62; \ + __ret_62 = __s0_62 + __s1_62 * splat_lane_s32(__s2_62, __p3_62); \ + __ret_62; \ +}) +#else +#define vmla_lane_s32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + int32x2_t __ret_63; \ + int32x2_t __s0_63 = __p0_63; \ + int32x2_t __s1_63 = __p1_63; \ + int32x2_t __s2_63 = __p2_63; \ + int32x2_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \ + int32x2_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 1, 0); \ + int32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ + __ret_63 = __rev0_63 + __rev1_63 * __noswap_splat_lane_s32(__rev2_63, __p3_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \ + __ret_63; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + int16x4_t __ret_64; \ + int16x4_t __s0_64 = __p0_64; \ + int16x4_t __s1_64 = __p1_64; \ + int16x4_t __s2_64 = __p2_64; \ + __ret_64 = __s0_64 + __s1_64 * splat_lane_s16(__s2_64, __p3_64); \ + __ret_64; \ +}) +#else +#define vmla_lane_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + int16x4_t __ret_65; \ + int16x4_t __s0_65 = __p0_65; \ + int16x4_t __s1_65 = __p1_65; \ + int16x4_t __s2_65 = __p2_65; \ + int16x4_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 3, 2, 1, 0); \ + int16x4_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 3, 2, 1, 0); \ + int16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ + __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_s16(__rev2_65, __p3_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 3, 2, 1, 0); \ + __ret_65; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + uint32x4_t __ret_66; \ + uint32x4_t __s0_66 = __p0_66; \ + uint32x4_t __s1_66 = __p1_66; \ + uint32x2_t __s2_66 = __p2_66; \ + __ret_66 = __s0_66 - __s1_66 * splatq_lane_u32(__s2_66, __p3_66); \ + __ret_66; \ +}) +#else +#define vmlsq_lane_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + uint32x4_t __ret_67; \ + uint32x4_t __s0_67 = __p0_67; \ + uint32x4_t __s1_67 = __p1_67; \ + uint32x2_t __s2_67 = __p2_67; \ + uint32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ + uint32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ + uint32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ + __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_u32(__rev2_67, __p3_67); \ + __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ + __ret_67; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + uint16x8_t __ret_68; \ + uint16x8_t __s0_68 = __p0_68; \ + uint16x8_t __s1_68 = __p1_68; \ + uint16x4_t __s2_68 = __p2_68; \ + __ret_68 = __s0_68 - __s1_68 * splatq_lane_u16(__s2_68, __p3_68); \ + __ret_68; \ +}) +#else +#define vmlsq_lane_u16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + uint16x8_t __ret_69; \ + uint16x8_t __s0_69 = __p0_69; \ + uint16x8_t __s1_69 = __p1_69; \ + uint16x4_t __s2_69 = __p2_69; \ + uint16x8_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \ + __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_u16(__rev2_69, __p3_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_69; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + float32x4_t __ret_70; \ + float32x4_t __s0_70 = __p0_70; \ + float32x4_t __s1_70 = __p1_70; \ + float32x2_t __s2_70 = __p2_70; \ + __ret_70 = __s0_70 - __s1_70 * splatq_lane_f32(__s2_70, __p3_70); \ + __ret_70; \ +}) +#else +#define vmlsq_lane_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + float32x4_t __ret_71; \ + float32x4_t __s0_71 = __p0_71; \ + float32x4_t __s1_71 = __p1_71; \ + float32x2_t __s2_71 = __p2_71; \ + float32x4_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \ + float32x4_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 3, 2, 1, 0); \ + float32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ + __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_f32(__rev2_71, __p3_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \ + __ret_71; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + int32x4_t __ret_72; \ + int32x4_t __s0_72 = __p0_72; \ + int32x4_t __s1_72 = __p1_72; \ + int32x2_t __s2_72 = __p2_72; \ + __ret_72 = __s0_72 - __s1_72 * splatq_lane_s32(__s2_72, __p3_72); \ + __ret_72; \ +}) +#else +#define vmlsq_lane_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + int32x4_t __ret_73; \ + int32x4_t __s0_73 = __p0_73; \ + int32x4_t __s1_73 = __p1_73; \ + int32x2_t __s2_73 = __p2_73; \ + int32x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ + int32x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ + int32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ + __ret_73 = __rev0_73 - __rev1_73 * __noswap_splatq_lane_s32(__rev2_73, __p3_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ + __ret_73; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + int16x8_t __ret_74; \ + int16x8_t __s0_74 = __p0_74; \ + int16x8_t __s1_74 = __p1_74; \ + int16x4_t __s2_74 = __p2_74; \ + __ret_74 = __s0_74 - __s1_74 * splatq_lane_s16(__s2_74, __p3_74); \ + __ret_74; \ +}) +#else +#define vmlsq_lane_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + int16x8_t __ret_75; \ + int16x8_t __s0_75 = __p0_75; \ + int16x8_t __s1_75 = __p1_75; \ + int16x4_t __s2_75 = __p2_75; \ + int16x8_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ + __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_s16(__rev2_75, __p3_75); \ + __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_75; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + uint32x2_t __ret_76; \ + uint32x2_t __s0_76 = __p0_76; \ + uint32x2_t __s1_76 = __p1_76; \ + uint32x2_t __s2_76 = __p2_76; \ + __ret_76 = __s0_76 - __s1_76 * splat_lane_u32(__s2_76, __p3_76); \ + __ret_76; \ +}) +#else +#define vmls_lane_u32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + uint32x2_t __ret_77; \ + uint32x2_t __s0_77 = __p0_77; \ + uint32x2_t __s1_77 = __p1_77; \ + uint32x2_t __s2_77 = __p2_77; \ + uint32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ + uint32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ + uint32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ + __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_u32(__rev2_77, __p3_77); \ + __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \ + __ret_77; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + uint16x4_t __ret_78; \ + uint16x4_t __s0_78 = __p0_78; \ + uint16x4_t __s1_78 = __p1_78; \ + uint16x4_t __s2_78 = __p2_78; \ + __ret_78 = __s0_78 - __s1_78 * splat_lane_u16(__s2_78, __p3_78); \ + __ret_78; \ +}) +#else +#define vmls_lane_u16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + uint16x4_t __ret_79; \ + uint16x4_t __s0_79 = __p0_79; \ + uint16x4_t __s1_79 = __p1_79; \ + uint16x4_t __s2_79 = __p2_79; \ + uint16x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ + uint16x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ + uint16x4_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 3, 2, 1, 0); \ + __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_u16(__rev2_79, __p3_79); \ + __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \ + __ret_79; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + float32x2_t __ret_80; \ + float32x2_t __s0_80 = __p0_80; \ + float32x2_t __s1_80 = __p1_80; \ + float32x2_t __s2_80 = __p2_80; \ + __ret_80 = __s0_80 - __s1_80 * splat_lane_f32(__s2_80, __p3_80); \ + __ret_80; \ +}) +#else +#define vmls_lane_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + float32x2_t __ret_81; \ + float32x2_t __s0_81 = __p0_81; \ + float32x2_t __s1_81 = __p1_81; \ + float32x2_t __s2_81 = __p2_81; \ + float32x2_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \ + float32x2_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 1, 0); \ + float32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ + __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_f32(__rev2_81, __p3_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \ + __ret_81; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ + int32x2_t __ret_82; \ + int32x2_t __s0_82 = __p0_82; \ + int32x2_t __s1_82 = __p1_82; \ + int32x2_t __s2_82 = __p2_82; \ + __ret_82 = __s0_82 - __s1_82 * splat_lane_s32(__s2_82, __p3_82); \ + __ret_82; \ +}) +#else +#define vmls_lane_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ + int32x2_t __ret_83; \ + int32x2_t __s0_83 = __p0_83; \ + int32x2_t __s1_83 = __p1_83; \ + int32x2_t __s2_83 = __p2_83; \ + int32x2_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \ + int32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ + int32x2_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 1, 0); \ + __ret_83 = __rev0_83 - __rev1_83 * __noswap_splat_lane_s32(__rev2_83, __p3_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \ + __ret_83; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ + int16x4_t __ret_84; \ + int16x4_t __s0_84 = __p0_84; \ + int16x4_t __s1_84 = __p1_84; \ + int16x4_t __s2_84 = __p2_84; \ + __ret_84 = __s0_84 - __s1_84 * splat_lane_s16(__s2_84, __p3_84); \ + __ret_84; \ +}) +#else +#define vmls_lane_s16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ + int16x4_t __ret_85; \ + int16x4_t __s0_85 = __p0_85; \ + int16x4_t __s1_85 = __p1_85; \ + int16x4_t __s2_85 = __p2_85; \ + int16x4_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \ + int16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ + int16x4_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \ + __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_s16(__rev2_85, __p3_85); \ + __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \ + __ret_85; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vmov_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmov_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmov_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vmov_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ + uint32x4_t __ret_86; \ + uint32x4_t __s0_86 = __p0_86; \ + uint32x2_t __s1_86 = __p1_86; \ + __ret_86 = __s0_86 * splatq_lane_u32(__s1_86, __p2_86); \ + __ret_86; \ +}) +#else +#define vmulq_lane_u32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ + uint32x4_t __ret_87; \ + uint32x4_t __s0_87 = __p0_87; \ + uint32x2_t __s1_87 = __p1_87; \ + uint32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + uint32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ + __ret_87 = __rev0_87 * __noswap_splatq_lane_u32(__rev1_87, __p2_87); \ + __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ + __ret_87; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u16(__p0_88, __p1_88, __p2_88) __extension__ ({ \ + uint16x8_t __ret_88; \ + uint16x8_t __s0_88 = __p0_88; \ + uint16x4_t __s1_88 = __p1_88; \ + __ret_88 = __s0_88 * splatq_lane_u16(__s1_88, __p2_88); \ + __ret_88; \ +}) +#else +#define vmulq_lane_u16(__p0_89, __p1_89, __p2_89) __extension__ ({ \ + uint16x8_t __ret_89; \ + uint16x8_t __s0_89 = __p0_89; \ + uint16x4_t __s1_89 = __p1_89; \ + uint16x8_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 3, 2, 1, 0); \ + __ret_89 = __rev0_89 * __noswap_splatq_lane_u16(__rev1_89, __p2_89); \ + __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_89; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f32(__p0_90, __p1_90, __p2_90) __extension__ ({ \ + float32x4_t __ret_90; \ + float32x4_t __s0_90 = __p0_90; \ + float32x2_t __s1_90 = __p1_90; \ + __ret_90 = __s0_90 * splatq_lane_f32(__s1_90, __p2_90); \ + __ret_90; \ +}) +#else +#define vmulq_lane_f32(__p0_91, __p1_91, __p2_91) __extension__ ({ \ + float32x4_t __ret_91; \ + float32x4_t __s0_91 = __p0_91; \ + float32x2_t __s1_91 = __p1_91; \ + float32x4_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 3, 2, 1, 0); \ + float32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ + __ret_91 = __rev0_91 * __noswap_splatq_lane_f32(__rev1_91, __p2_91); \ + __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 3, 2, 1, 0); \ + __ret_91; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ + int32x4_t __ret_92; \ + int32x4_t __s0_92 = __p0_92; \ + int32x2_t __s1_92 = __p1_92; \ + __ret_92 = __s0_92 * splatq_lane_s32(__s1_92, __p2_92); \ + __ret_92; \ +}) +#else +#define vmulq_lane_s32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ + int32x4_t __ret_93; \ + int32x4_t __s0_93 = __p0_93; \ + int32x2_t __s1_93 = __p1_93; \ + int32x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ + int32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ + __ret_93 = __rev0_93 * __noswap_splatq_lane_s32(__rev1_93, __p2_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ + __ret_93; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ + int16x8_t __ret_94; \ + int16x8_t __s0_94 = __p0_94; \ + int16x4_t __s1_94 = __p1_94; \ + __ret_94 = __s0_94 * splatq_lane_s16(__s1_94, __p2_94); \ + __ret_94; \ +}) +#else +#define vmulq_lane_s16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ + int16x8_t __ret_95; \ + int16x8_t __s0_95 = __p0_95; \ + int16x4_t __s1_95 = __p1_95; \ + int16x8_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ + __ret_95 = __rev0_95 * __noswap_splatq_lane_s16(__rev1_95, __p2_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_95; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ + uint32x2_t __ret_96; \ + uint32x2_t __s0_96 = __p0_96; \ + uint32x2_t __s1_96 = __p1_96; \ + __ret_96 = __s0_96 * splat_lane_u32(__s1_96, __p2_96); \ + __ret_96; \ +}) +#else +#define vmul_lane_u32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ + uint32x2_t __ret_97; \ + uint32x2_t __s0_97 = __p0_97; \ + uint32x2_t __s1_97 = __p1_97; \ + uint32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ + uint32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ + __ret_97 = __rev0_97 * __noswap_splat_lane_u32(__rev1_97, __p2_97); \ + __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ + __ret_97; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u16(__p0_98, __p1_98, __p2_98) __extension__ ({ \ + uint16x4_t __ret_98; \ + uint16x4_t __s0_98 = __p0_98; \ + uint16x4_t __s1_98 = __p1_98; \ + __ret_98 = __s0_98 * splat_lane_u16(__s1_98, __p2_98); \ + __ret_98; \ +}) +#else +#define vmul_lane_u16(__p0_99, __p1_99, __p2_99) __extension__ ({ \ + uint16x4_t __ret_99; \ + uint16x4_t __s0_99 = __p0_99; \ + uint16x4_t __s1_99 = __p1_99; \ + uint16x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ + uint16x4_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 3, 2, 1, 0); \ + __ret_99 = __rev0_99 * __noswap_splat_lane_u16(__rev1_99, __p2_99); \ + __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \ + __ret_99; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ + float32x2_t __ret_100; \ + float32x2_t __s0_100 = __p0_100; \ + float32x2_t __s1_100 = __p1_100; \ + __ret_100 = __s0_100 * splat_lane_f32(__s1_100, __p2_100); \ + __ret_100; \ +}) +#else +#define vmul_lane_f32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ + float32x2_t __ret_101; \ + float32x2_t __s0_101 = __p0_101; \ + float32x2_t __s1_101 = __p1_101; \ + float32x2_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 1, 0); \ + float32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ + __ret_101 = __rev0_101 * __noswap_splat_lane_f32(__rev1_101, __p2_101); \ + __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 1, 0); \ + __ret_101; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ + int32x2_t __ret_102; \ + int32x2_t __s0_102 = __p0_102; \ + int32x2_t __s1_102 = __p1_102; \ + __ret_102 = __s0_102 * splat_lane_s32(__s1_102, __p2_102); \ + __ret_102; \ +}) +#else +#define vmul_lane_s32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ + int32x2_t __ret_103; \ + int32x2_t __s0_103 = __p0_103; \ + int32x2_t __s1_103 = __p1_103; \ + int32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ + int32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ + __ret_103 = __rev0_103 * __noswap_splat_lane_s32(__rev1_103, __p2_103); \ + __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ + __ret_103; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ + int16x4_t __ret_104; \ + int16x4_t __s0_104 = __p0_104; \ + int16x4_t __s1_104 = __p1_104; \ + __ret_104 = __s0_104 * splat_lane_s16(__s1_104, __p2_104); \ + __ret_104; \ +}) +#else +#define vmul_lane_s16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ + int16x4_t __ret_105; \ + int16x4_t __s0_105 = __p0_105; \ + int16x4_t __s1_105 = __p1_105; \ + int16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ + int16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ + __ret_105 = __rev0_105 * __noswap_splat_lane_s16(__rev1_105, __p2_105); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ + __ret_105; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __ret; + __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __ret; + __ret = __p0 * (uint32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (uint32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __ret; + __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + __ret = __p0 * (float32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (float32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = __p0 * (int32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (int32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ + uint64x2_t __ret_106; \ + uint32x2_t __s0_106 = __p0_106; \ + uint32x2_t __s1_106 = __p1_106; \ + __ret_106 = vmull_u32(__s0_106, splat_lane_u32(__s1_106, __p2_106)); \ + __ret_106; \ +}) +#else +#define vmull_lane_u32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ + uint64x2_t __ret_107; \ + uint32x2_t __s0_107 = __p0_107; \ + uint32x2_t __s1_107 = __p1_107; \ + uint32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ + uint32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ + __ret_107 = __noswap_vmull_u32(__rev0_107, __noswap_splat_lane_u32(__rev1_107, __p2_107)); \ + __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \ + __ret_107; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ + uint32x4_t __ret_108; \ + uint16x4_t __s0_108 = __p0_108; \ + uint16x4_t __s1_108 = __p1_108; \ + __ret_108 = vmull_u16(__s0_108, splat_lane_u16(__s1_108, __p2_108)); \ + __ret_108; \ +}) +#else +#define vmull_lane_u16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ + uint32x4_t __ret_109; \ + uint16x4_t __s0_109 = __p0_109; \ + uint16x4_t __s1_109 = __p1_109; \ + uint16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ + uint16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ + __ret_109 = __noswap_vmull_u16(__rev0_109, __noswap_splat_lane_u16(__rev1_109, __p2_109)); \ + __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \ + __ret_109; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ + int64x2_t __ret_110; \ + int32x2_t __s0_110 = __p0_110; \ + int32x2_t __s1_110 = __p1_110; \ + __ret_110 = vmull_s32(__s0_110, splat_lane_s32(__s1_110, __p2_110)); \ + __ret_110; \ +}) +#else +#define vmull_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ + int64x2_t __ret_111; \ + int32x2_t __s0_111 = __p0_111; \ + int32x2_t __s1_111 = __p1_111; \ + int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + __ret_111 = __noswap_vmull_s32(__rev0_111, __noswap_splat_lane_s32(__rev1_111, __p2_111)); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ + int32x4_t __ret_112; \ + int16x4_t __s0_112 = __p0_112; \ + int16x4_t __s1_112 = __p1_112; \ + __ret_112 = vmull_s16(__s0_112, splat_lane_s16(__s1_112, __p2_112)); \ + __ret_112; \ +}) +#else +#define vmull_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ + int32x4_t __ret_113; \ + int16x4_t __s0_113 = __p0_113; \ + int16x4_t __s1_113 = __p1_113; \ + int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ + int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ + __ret_113 = __noswap_vmull_s16(__rev0_113, __noswap_splat_lane_s16(__rev1_113, __p2_113)); \ + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ + __ret_113; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ + int64x2_t __ret_114; \ + int64x2_t __s0_114 = __p0_114; \ + int32x2_t __s1_114 = __p1_114; \ + int32x2_t __s2_114 = __p2_114; \ + __ret_114 = vqdmlal_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ + __ret_114; \ +}) +#else +#define vqdmlal_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ + int64x2_t __ret_115; \ + int64x2_t __s0_115 = __p0_115; \ + int32x2_t __s1_115 = __p1_115; \ + int32x2_t __s2_115 = __p2_115; \ + int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ + int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ + int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ + __ret_115 = __noswap_vqdmlal_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ + __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ + __ret_115; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ + int32x4_t __ret_116; \ + int32x4_t __s0_116 = __p0_116; \ + int16x4_t __s1_116 = __p1_116; \ + int16x4_t __s2_116 = __p2_116; \ + __ret_116 = vqdmlal_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ + __ret_116; \ +}) +#else +#define vqdmlal_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ + int32x4_t __ret_117; \ + int32x4_t __s0_117 = __p0_117; \ + int16x4_t __s1_117 = __p1_117; \ + int16x4_t __s2_117 = __p2_117; \ + int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ + int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ + int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ + __ret_117 = __noswap_vqdmlal_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ + __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ + __ret_117; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \ + int64x2_t __ret_118; \ + int64x2_t __s0_118 = __p0_118; \ + int32x2_t __s1_118 = __p1_118; \ + int32x2_t __s2_118 = __p2_118; \ + __ret_118 = vqdmlsl_s32(__s0_118, __s1_118, splat_lane_s32(__s2_118, __p3_118)); \ + __ret_118; \ +}) +#else +#define vqdmlsl_lane_s32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \ + int64x2_t __ret_119; \ + int64x2_t __s0_119 = __p0_119; \ + int32x2_t __s1_119 = __p1_119; \ + int32x2_t __s2_119 = __p2_119; \ + int64x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ + int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ + int32x2_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 1, 0); \ + __ret_119 = __noswap_vqdmlsl_s32(__rev0_119, __rev1_119, __noswap_splat_lane_s32(__rev2_119, __p3_119)); \ + __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ + __ret_119; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s16(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \ + int32x4_t __ret_120; \ + int32x4_t __s0_120 = __p0_120; \ + int16x4_t __s1_120 = __p1_120; \ + int16x4_t __s2_120 = __p2_120; \ + __ret_120 = vqdmlsl_s16(__s0_120, __s1_120, splat_lane_s16(__s2_120, __p3_120)); \ + __ret_120; \ +}) +#else +#define vqdmlsl_lane_s16(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \ + int32x4_t __ret_121; \ + int32x4_t __s0_121 = __p0_121; \ + int16x4_t __s1_121 = __p1_121; \ + int16x4_t __s2_121 = __p2_121; \ + int32x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ + int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ + int16x4_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 3, 2, 1, 0); \ + __ret_121 = __noswap_vqdmlsl_s16(__rev0_121, __rev1_121, __noswap_splat_lane_s16(__rev2_121, __p3_121)); \ + __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ + __ret_121; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s32(__p0_122, __p1_122, __p2_122) __extension__ ({ \ + int64x2_t __ret_122; \ + int32x2_t __s0_122 = __p0_122; \ + int32x2_t __s1_122 = __p1_122; \ + __ret_122 = vqdmull_s32(__s0_122, splat_lane_s32(__s1_122, __p2_122)); \ + __ret_122; \ +}) +#else +#define vqdmull_lane_s32(__p0_123, __p1_123, __p2_123) __extension__ ({ \ + int64x2_t __ret_123; \ + int32x2_t __s0_123 = __p0_123; \ + int32x2_t __s1_123 = __p1_123; \ + int32x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ + int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ + __ret_123 = __noswap_vqdmull_s32(__rev0_123, __noswap_splat_lane_s32(__rev1_123, __p2_123)); \ + __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ + __ret_123; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s16(__p0_124, __p1_124, __p2_124) __extension__ ({ \ + int32x4_t __ret_124; \ + int16x4_t __s0_124 = __p0_124; \ + int16x4_t __s1_124 = __p1_124; \ + __ret_124 = vqdmull_s16(__s0_124, splat_lane_s16(__s1_124, __p2_124)); \ + __ret_124; \ +}) +#else +#define vqdmull_lane_s16(__p0_125, __p1_125, __p2_125) __extension__ ({ \ + int32x4_t __ret_125; \ + int16x4_t __s0_125 = __p0_125; \ + int16x4_t __s1_125 = __p1_125; \ + int16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ + int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ + __ret_125 = __noswap_vqdmull_s16(__rev0_125, __noswap_splat_lane_s16(__rev1_125, __p2_125)); \ + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ + __ret_125; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ +}) +#else +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ +}) +#else +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ +}) +#else +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ +}) +#else +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ +}) +#else +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ +}) +#else +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ +}) +#else +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ +}) +#else +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ +}) +#else +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ +}) +#else +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ +}) +#else +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ +}) +#else +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ +}) +#else +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ +}) +#else +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ +}) +#else +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ +}) +#endif + +#define vst1_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ +}) +#else +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ +}) +#else +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ +}) +#else +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ +}) +#else +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ +}) +#endif + +#define vst1_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ +}) +#else +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ +}) +#else +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ +}) +#else +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ +}) +#else +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ +}) +#else +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ +}) +#else +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ +}) +#else +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ +}) +#else +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ +}) +#else +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ +}) +#else +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ +}) +#else +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ +}) +#else +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ +}) +#else +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ +}) +#else +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ +}) +#else +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ +}) +#else +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ +}) +#endif + +#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ +}) +#else +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ +}) +#else +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ +}) +#else +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ +}) +#else +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ +}) +#endif + +#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ +}) +#else +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) +#else +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) +#else +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) +#else +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) +#else +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) +#else +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) +#else +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) +#endif + +#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) +#else +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) +#else +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) +#else +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) +#else +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) +#else +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) +#else +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) +#endif + +#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) +#else +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) +#else +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) +#else +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) +#else +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) +#else +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) +#else +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) +#else +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#define vst2_u64(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) +#else +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) +#else +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) +#endif + +#define vst2_s64(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) +#else +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ +}) +#else +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ +}) +#else +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ +}) +#else +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ +}) +#else +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ +}) +#else +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ +}) +#else +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ +}) +#else +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ +}) +#else +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +}) +#else +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +}) +#else +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +}) +#else +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +}) +#else +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ +}) +#else +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ +}) +#else +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ +}) +#else +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) +#else +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) +#else +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#define vst3_u64(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) +#else +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) +#else +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) +#endif + +#define vst3_s64(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) +#else +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +}) +#else +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +}) +#else +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +}) +#else +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +}) +#else +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +}) +#else +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ +}) +#else +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ +}) +#else +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ +}) +#else +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +}) +#else +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +}) +#else +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +}) +#else +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +}) +#else +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ +}) +#else +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ +}) +#else +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ +}) +#else +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) +#else +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) +#else +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) +#else +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst4_u64(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst4_s64(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ +}) +#else +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ +}) +#else +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ +}) +#else +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ +}) +#else +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ +}) +#else +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ +}) +#else +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ +}) +#else +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ +}) +#else +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ +}) +#else +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ +}) +#else +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ +}) +#else +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ +}) +#else +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ +}) +#else +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ +}) +#else +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ +}) +#else +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) - vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) - vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) - vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) - vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) - vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) - vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_lane_f32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ + float32x4_t __ret_126; \ + float32x4_t __s0_126 = __p0_126; \ + bfloat16x8_t __s1_126 = __p1_126; \ + bfloat16x4_t __s2_126 = __p2_126; \ +bfloat16x4_t __reint_126 = __s2_126; \ +float32x4_t __reint1_126 = splatq_lane_f32(*(float32x2_t *) &__reint_126, __p3_126); \ + __ret_126 = vbfdotq_f32(__s0_126, __s1_126, *(bfloat16x8_t *) &__reint1_126); \ + __ret_126; \ +}) +#else +#define vbfdotq_lane_f32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ + float32x4_t __ret_127; \ + float32x4_t __s0_127 = __p0_127; \ + bfloat16x8_t __s1_127 = __p1_127; \ + bfloat16x4_t __s2_127 = __p2_127; \ + float32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 3, 2, 1, 0); \ +bfloat16x4_t __reint_127 = __rev2_127; \ +float32x4_t __reint1_127 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_127, __p3_127); \ + __ret_127 = __noswap_vbfdotq_f32(__rev0_127, __rev1_127, *(bfloat16x8_t *) &__reint1_127); \ + __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \ + __ret_127; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_lane_f32(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ + float32x2_t __ret_128; \ + float32x2_t __s0_128 = __p0_128; \ + bfloat16x4_t __s1_128 = __p1_128; \ + bfloat16x4_t __s2_128 = __p2_128; \ +bfloat16x4_t __reint_128 = __s2_128; \ +float32x2_t __reint1_128 = splat_lane_f32(*(float32x2_t *) &__reint_128, __p3_128); \ + __ret_128 = vbfdot_f32(__s0_128, __s1_128, *(bfloat16x4_t *) &__reint1_128); \ + __ret_128; \ +}) +#else +#define vbfdot_lane_f32(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ + float32x2_t __ret_129; \ + float32x2_t __s0_129 = __p0_129; \ + bfloat16x4_t __s1_129 = __p1_129; \ + bfloat16x4_t __s2_129 = __p2_129; \ + float32x2_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 1, 0); \ + bfloat16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ +bfloat16x4_t __reint_129 = __rev2_129; \ +float32x2_t __reint1_129 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_129, __p3_129); \ + __ret_129 = __noswap_vbfdot_f32(__rev0_129, __rev1_129, *(bfloat16x4_t *) &__reint1_129); \ + __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 1, 0); \ + __ret_129; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_laneq_f32(__p0_130, __p1_130, __p2_130, __p3_130) __extension__ ({ \ + float32x4_t __ret_130; \ + float32x4_t __s0_130 = __p0_130; \ + bfloat16x8_t __s1_130 = __p1_130; \ + bfloat16x8_t __s2_130 = __p2_130; \ +bfloat16x8_t __reint_130 = __s2_130; \ +float32x4_t __reint1_130 = splatq_laneq_f32(*(float32x4_t *) &__reint_130, __p3_130); \ + __ret_130 = vbfdotq_f32(__s0_130, __s1_130, *(bfloat16x8_t *) &__reint1_130); \ + __ret_130; \ +}) +#else +#define vbfdotq_laneq_f32(__p0_131, __p1_131, __p2_131, __p3_131) __extension__ ({ \ + float32x4_t __ret_131; \ + float32x4_t __s0_131 = __p0_131; \ + bfloat16x8_t __s1_131 = __p1_131; \ + bfloat16x8_t __s2_131 = __p2_131; \ + float32x4_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_131; __rev2_131 = __builtin_shufflevector(__s2_131, __s2_131, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_131 = __rev2_131; \ +float32x4_t __reint1_131 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_131, __p3_131); \ + __ret_131 = __noswap_vbfdotq_f32(__rev0_131, __rev1_131, *(bfloat16x8_t *) &__reint1_131); \ + __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 3, 2, 1, 0); \ + __ret_131; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_laneq_f32(__p0_132, __p1_132, __p2_132, __p3_132) __extension__ ({ \ + float32x2_t __ret_132; \ + float32x2_t __s0_132 = __p0_132; \ + bfloat16x4_t __s1_132 = __p1_132; \ + bfloat16x8_t __s2_132 = __p2_132; \ +bfloat16x8_t __reint_132 = __s2_132; \ +float32x2_t __reint1_132 = splat_laneq_f32(*(float32x4_t *) &__reint_132, __p3_132); \ + __ret_132 = vbfdot_f32(__s0_132, __s1_132, *(bfloat16x4_t *) &__reint1_132); \ + __ret_132; \ +}) +#else +#define vbfdot_laneq_f32(__p0_133, __p1_133, __p2_133, __p3_133) __extension__ ({ \ + float32x2_t __ret_133; \ + float32x2_t __s0_133 = __p0_133; \ + bfloat16x4_t __s1_133 = __p1_133; \ + bfloat16x8_t __s2_133 = __p2_133; \ + float32x2_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 1, 0); \ + bfloat16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_133; __rev2_133 = __builtin_shufflevector(__s2_133, __s2_133, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_133 = __rev2_133; \ +float32x2_t __reint1_133 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_133, __p3_133); \ + __ret_133 = __noswap_vbfdot_f32(__rev0_133, __rev1_133, *(bfloat16x4_t *) &__reint1_133); \ + __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0); \ + __ret_133; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (bfloat16x4_t)(__promote); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_134) { + float32x4_t __ret_134; +bfloat16x4_t __reint_134 = __p0_134; +int32x4_t __reint1_134 = vshll_n_s16(*(int16x4_t *) &__reint_134, 16); + __ret_134 = *(float32x4_t *) &__reint1_134; + return __ret_134; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_135) { + float32x4_t __ret_135; + bfloat16x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__p0_135, __p0_135, 3, 2, 1, 0); +bfloat16x4_t __reint_135 = __rev0_135; +int32x4_t __reint1_135 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_135, 16); + __ret_135 = *(float32x4_t *) &__reint1_135; + __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); + return __ret_135; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_136) { + float32x4_t __ret_136; +bfloat16x4_t __reint_136 = __p0_136; +int32x4_t __reint1_136 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_136, 16); + __ret_136 = *(float32x4_t *) &__reint1_136; + return __ret_136; +} +#endif + +__ai __attribute__((target("bf16"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { + float32_t __ret; +bfloat16_t __reint = __p0; +int32_t __reint1 = *(int32_t *) &__reint << 16; + __ret = *(float32_t *) &__reint1; + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { + bfloat16_t __ret; + __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_bf16(__p0_137, __p1_137) __extension__ ({ \ + bfloat16x8_t __ret_137; \ + bfloat16x4_t __s0_137 = __p0_137; \ + __ret_137 = splatq_lane_bf16(__s0_137, __p1_137); \ + __ret_137; \ +}) +#else +#define vdupq_lane_bf16(__p0_138, __p1_138) __extension__ ({ \ + bfloat16x8_t __ret_138; \ + bfloat16x4_t __s0_138 = __p0_138; \ + bfloat16x4_t __rev0_138; __rev0_138 = __builtin_shufflevector(__s0_138, __s0_138, 3, 2, 1, 0); \ + __ret_138 = __noswap_splatq_lane_bf16(__rev0_138, __p1_138); \ + __ret_138 = __builtin_shufflevector(__ret_138, __ret_138, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_138; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_bf16(__p0_139, __p1_139) __extension__ ({ \ + bfloat16x4_t __ret_139; \ + bfloat16x4_t __s0_139 = __p0_139; \ + __ret_139 = splat_lane_bf16(__s0_139, __p1_139); \ + __ret_139; \ +}) +#else +#define vdup_lane_bf16(__p0_140, __p1_140) __extension__ ({ \ + bfloat16x4_t __ret_140; \ + bfloat16x4_t __s0_140 = __p0_140; \ + bfloat16x4_t __rev0_140; __rev0_140 = __builtin_shufflevector(__s0_140, __s0_140, 3, 2, 1, 0); \ + __ret_140 = __noswap_splat_lane_bf16(__rev0_140, __p1_140); \ + __ret_140 = __builtin_shufflevector(__ret_140, __ret_140, 3, 2, 1, 0); \ + __ret_140; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_bf16(__p0_141, __p1_141) __extension__ ({ \ + bfloat16x8_t __ret_141; \ + bfloat16x8_t __s0_141 = __p0_141; \ + __ret_141 = splatq_laneq_bf16(__s0_141, __p1_141); \ + __ret_141; \ +}) +#else +#define vdupq_laneq_bf16(__p0_142, __p1_142) __extension__ ({ \ + bfloat16x8_t __ret_142; \ + bfloat16x8_t __s0_142 = __p0_142; \ + bfloat16x8_t __rev0_142; __rev0_142 = __builtin_shufflevector(__s0_142, __s0_142, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_142 = __noswap_splatq_laneq_bf16(__rev0_142, __p1_142); \ + __ret_142 = __builtin_shufflevector(__ret_142, __ret_142, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_142; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_bf16(__p0_143, __p1_143) __extension__ ({ \ + bfloat16x4_t __ret_143; \ + bfloat16x8_t __s0_143 = __p0_143; \ + __ret_143 = splat_laneq_bf16(__s0_143, __p1_143); \ + __ret_143; \ +}) +#else +#define vdup_laneq_bf16(__p0_144, __p1_144) __extension__ ({ \ + bfloat16x4_t __ret_144; \ + bfloat16x8_t __s0_144 = __p0_144; \ + bfloat16x8_t __rev0_144; __rev0_144 = __builtin_shufflevector(__s0_144, __s0_144, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_144 = __noswap_splat_laneq_bf16(__rev0_144, __p1_144); \ + __ret_144 = __builtin_shufflevector(__ret_144, __ret_144, 3, 2, 1, 0); \ + __ret_144; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ + __ret; \ +}) +#else +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ + __ret; \ +}) +#else +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ + __ret; \ +}) +#else +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ + __ret; \ +}) +#else +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ + __ret; \ +}) +#else +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ + __ret; \ +}) +#else +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ + __ret; \ +}) +#else +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ + __ret; \ +}) +#else +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ +}) +#else +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ +}) +#else +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ +}) +#else +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ +}) +#else +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ +}) +#else +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ +}) +#else +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ +}) +#else +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ +}) +#else +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ +}) +#else +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ +}) +#else +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_u32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ + uint32x4_t __ret_145; \ + uint32x4_t __s0_145 = __p0_145; \ + uint8x16_t __s1_145 = __p1_145; \ + uint8x8_t __s2_145 = __p2_145; \ +uint8x8_t __reint_145 = __s2_145; \ +uint32x4_t __reint1_145 = splatq_lane_u32(*(uint32x2_t *) &__reint_145, __p3_145); \ + __ret_145 = vdotq_u32(__s0_145, __s1_145, *(uint8x16_t *) &__reint1_145); \ + __ret_145; \ +}) +#else +#define vdotq_lane_u32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ + uint32x4_t __ret_146; \ + uint32x4_t __s0_146 = __p0_146; \ + uint8x16_t __s1_146 = __p1_146; \ + uint8x8_t __s2_146 = __p2_146; \ + uint32x4_t __rev0_146; __rev0_146 = __builtin_shufflevector(__s0_146, __s0_146, 3, 2, 1, 0); \ + uint8x16_t __rev1_146; __rev1_146 = __builtin_shufflevector(__s1_146, __s1_146, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_146; __rev2_146 = __builtin_shufflevector(__s2_146, __s2_146, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_146 = __rev2_146; \ +uint32x4_t __reint1_146 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_146, __p3_146); \ + __ret_146 = __noswap_vdotq_u32(__rev0_146, __rev1_146, *(uint8x16_t *) &__reint1_146); \ + __ret_146 = __builtin_shufflevector(__ret_146, __ret_146, 3, 2, 1, 0); \ + __ret_146; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ + int32x4_t __ret_147; \ + int32x4_t __s0_147 = __p0_147; \ + int8x16_t __s1_147 = __p1_147; \ + int8x8_t __s2_147 = __p2_147; \ +int8x8_t __reint_147 = __s2_147; \ +int32x4_t __reint1_147 = splatq_lane_s32(*(int32x2_t *) &__reint_147, __p3_147); \ + __ret_147 = vdotq_s32(__s0_147, __s1_147, *(int8x16_t *) &__reint1_147); \ + __ret_147; \ +}) +#else +#define vdotq_lane_s32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ + int32x4_t __ret_148; \ + int32x4_t __s0_148 = __p0_148; \ + int8x16_t __s1_148 = __p1_148; \ + int8x8_t __s2_148 = __p2_148; \ + int32x4_t __rev0_148; __rev0_148 = __builtin_shufflevector(__s0_148, __s0_148, 3, 2, 1, 0); \ + int8x16_t __rev1_148; __rev1_148 = __builtin_shufflevector(__s1_148, __s1_148, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_148; __rev2_148 = __builtin_shufflevector(__s2_148, __s2_148, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_148 = __rev2_148; \ +int32x4_t __reint1_148 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_148, __p3_148); \ + __ret_148 = __noswap_vdotq_s32(__rev0_148, __rev1_148, *(int8x16_t *) &__reint1_148); \ + __ret_148 = __builtin_shufflevector(__ret_148, __ret_148, 3, 2, 1, 0); \ + __ret_148; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_u32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ + uint32x2_t __ret_149; \ + uint32x2_t __s0_149 = __p0_149; \ + uint8x8_t __s1_149 = __p1_149; \ + uint8x8_t __s2_149 = __p2_149; \ +uint8x8_t __reint_149 = __s2_149; \ +uint32x2_t __reint1_149 = splat_lane_u32(*(uint32x2_t *) &__reint_149, __p3_149); \ + __ret_149 = vdot_u32(__s0_149, __s1_149, *(uint8x8_t *) &__reint1_149); \ + __ret_149; \ +}) +#else +#define vdot_lane_u32(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ + uint32x2_t __ret_150; \ + uint32x2_t __s0_150 = __p0_150; \ + uint8x8_t __s1_150 = __p1_150; \ + uint8x8_t __s2_150 = __p2_150; \ + uint32x2_t __rev0_150; __rev0_150 = __builtin_shufflevector(__s0_150, __s0_150, 1, 0); \ + uint8x8_t __rev1_150; __rev1_150 = __builtin_shufflevector(__s1_150, __s1_150, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_150; __rev2_150 = __builtin_shufflevector(__s2_150, __s2_150, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_150 = __rev2_150; \ +uint32x2_t __reint1_150 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150); \ + __ret_150 = __noswap_vdot_u32(__rev0_150, __rev1_150, *(uint8x8_t *) &__reint1_150); \ + __ret_150 = __builtin_shufflevector(__ret_150, __ret_150, 1, 0); \ + __ret_150; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_s32(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ + int32x2_t __ret_151; \ + int32x2_t __s0_151 = __p0_151; \ + int8x8_t __s1_151 = __p1_151; \ + int8x8_t __s2_151 = __p2_151; \ +int8x8_t __reint_151 = __s2_151; \ +int32x2_t __reint1_151 = splat_lane_s32(*(int32x2_t *) &__reint_151, __p3_151); \ + __ret_151 = vdot_s32(__s0_151, __s1_151, *(int8x8_t *) &__reint1_151); \ + __ret_151; \ +}) +#else +#define vdot_lane_s32(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ + int32x2_t __ret_152; \ + int32x2_t __s0_152 = __p0_152; \ + int8x8_t __s1_152 = __p1_152; \ + int8x8_t __s2_152 = __p2_152; \ + int32x2_t __rev0_152; __rev0_152 = __builtin_shufflevector(__s0_152, __s0_152, 1, 0); \ + int8x8_t __rev1_152; __rev1_152 = __builtin_shufflevector(__s1_152, __s1_152, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_152; __rev2_152 = __builtin_shufflevector(__s2_152, __s2_152, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_152 = __rev2_152; \ +int32x2_t __reint1_152 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_152, __p3_152); \ + __ret_152 = __noswap_vdot_s32(__rev0_152, __rev1_152, *(int8x8_t *) &__reint1_152); \ + __ret_152 = __builtin_shufflevector(__ret_152, __ret_152, 1, 0); \ + __ret_152; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = vfmaq_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = vfma_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f16(__p0_153, __p1_153, __p2_153) __extension__ ({ \ + float16x8_t __ret_153; \ + float16x8_t __s0_153 = __p0_153; \ + float16x4_t __s1_153 = __p1_153; \ + __ret_153 = __s0_153 * splatq_lane_f16(__s1_153, __p2_153); \ + __ret_153; \ +}) +#else +#define vmulq_lane_f16(__p0_154, __p1_154, __p2_154) __extension__ ({ \ + float16x8_t __ret_154; \ + float16x8_t __s0_154 = __p0_154; \ + float16x4_t __s1_154 = __p1_154; \ + float16x8_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_154; __rev1_154 = __builtin_shufflevector(__s1_154, __s1_154, 3, 2, 1, 0); \ + __ret_154 = __rev0_154 * __noswap_splatq_lane_f16(__rev1_154, __p2_154); \ + __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_154; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f16(__p0_155, __p1_155, __p2_155) __extension__ ({ \ + float16x4_t __ret_155; \ + float16x4_t __s0_155 = __p0_155; \ + float16x4_t __s1_155 = __p1_155; \ + __ret_155 = __s0_155 * splat_lane_f16(__s1_155, __p2_155); \ + __ret_155; \ +}) +#else +#define vmul_lane_f16(__p0_156, __p1_156, __p2_156) __extension__ ({ \ + float16x4_t __ret_156; \ + float16x4_t __s0_156 = __p0_156; \ + float16x4_t __s1_156 = __p1_156; \ + float16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ + float16x4_t __rev1_156; __rev1_156 = __builtin_shufflevector(__s1_156, __s1_156, 3, 2, 1, 0); \ + __ret_156 = __rev0_156 * __noswap_splat_lane_f16(__rev1_156, __p2_156); \ + __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ + __ret_156; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vzip_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_lane_s32(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ + int32x4_t __ret_157; \ + int32x4_t __s0_157 = __p0_157; \ + uint8x16_t __s1_157 = __p1_157; \ + int8x8_t __s2_157 = __p2_157; \ +int8x8_t __reint_157 = __s2_157; \ + __ret_157 = vusdotq_s32(__s0_157, __s1_157, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_157, __p3_157))); \ + __ret_157; \ +}) +#else +#define vusdotq_lane_s32(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ + int32x4_t __ret_158; \ + int32x4_t __s0_158 = __p0_158; \ + uint8x16_t __s1_158 = __p1_158; \ + int8x8_t __s2_158 = __p2_158; \ + int32x4_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 3, 2, 1, 0); \ + uint8x16_t __rev1_158; __rev1_158 = __builtin_shufflevector(__s1_158, __s1_158, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_158; __rev2_158 = __builtin_shufflevector(__s2_158, __s2_158, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_158 = __rev2_158; \ + __ret_158 = __noswap_vusdotq_s32(__rev0_158, __rev1_158, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_158, __p3_158))); \ + __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 3, 2, 1, 0); \ + __ret_158; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_lane_s32(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ + int32x2_t __ret_159; \ + int32x2_t __s0_159 = __p0_159; \ + uint8x8_t __s1_159 = __p1_159; \ + int8x8_t __s2_159 = __p2_159; \ +int8x8_t __reint_159 = __s2_159; \ + __ret_159 = vusdot_s32(__s0_159, __s1_159, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_159, __p3_159))); \ + __ret_159; \ +}) +#else +#define vusdot_lane_s32(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ + int32x2_t __ret_160; \ + int32x2_t __s0_160 = __p0_160; \ + uint8x8_t __s1_160 = __p1_160; \ + int8x8_t __s2_160 = __p2_160; \ + int32x2_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 1, 0); \ + uint8x8_t __rev1_160; __rev1_160 = __builtin_shufflevector(__s1_160, __s1_160, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_160; __rev2_160 = __builtin_shufflevector(__s2_160, __s2_160, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_160 = __rev2_160; \ + __ret_160 = __noswap_vusdot_s32(__rev0_160, __rev1_160, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_160, __p3_160))); \ + __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 1, 0); \ + __ret_160; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s32(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ + int32x4_t __ret_161; \ + int32x4_t __s0_161 = __p0_161; \ + int32x4_t __s1_161 = __p1_161; \ + int32x2_t __s2_161 = __p2_161; \ + __ret_161 = vqrdmlahq_s32(__s0_161, __s1_161, splatq_lane_s32(__s2_161, __p3_161)); \ + __ret_161; \ +}) +#else +#define vqrdmlahq_lane_s32(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ + int32x4_t __ret_162; \ + int32x4_t __s0_162 = __p0_162; \ + int32x4_t __s1_162 = __p1_162; \ + int32x2_t __s2_162 = __p2_162; \ + int32x4_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 3, 2, 1, 0); \ + int32x4_t __rev1_162; __rev1_162 = __builtin_shufflevector(__s1_162, __s1_162, 3, 2, 1, 0); \ + int32x2_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 1, 0); \ + __ret_162 = __noswap_vqrdmlahq_s32(__rev0_162, __rev1_162, __noswap_splatq_lane_s32(__rev2_162, __p3_162)); \ + __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 3, 2, 1, 0); \ + __ret_162; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ + int16x8_t __ret_163; \ + int16x8_t __s0_163 = __p0_163; \ + int16x8_t __s1_163 = __p1_163; \ + int16x4_t __s2_163 = __p2_163; \ + __ret_163 = vqrdmlahq_s16(__s0_163, __s1_163, splatq_lane_s16(__s2_163, __p3_163)); \ + __ret_163; \ +}) +#else +#define vqrdmlahq_lane_s16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ + int16x8_t __ret_164; \ + int16x8_t __s0_164 = __p0_164; \ + int16x8_t __s1_164 = __p1_164; \ + int16x4_t __s2_164 = __p2_164; \ + int16x8_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_164; __rev1_164 = __builtin_shufflevector(__s1_164, __s1_164, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ + __ret_164 = __noswap_vqrdmlahq_s16(__rev0_164, __rev1_164, __noswap_splatq_lane_s16(__rev2_164, __p3_164)); \ + __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_164; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s32(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ + int32x2_t __ret_165; \ + int32x2_t __s0_165 = __p0_165; \ + int32x2_t __s1_165 = __p1_165; \ + int32x2_t __s2_165 = __p2_165; \ + __ret_165 = vqrdmlah_s32(__s0_165, __s1_165, splat_lane_s32(__s2_165, __p3_165)); \ + __ret_165; \ +}) +#else +#define vqrdmlah_lane_s32(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ + int32x2_t __ret_166; \ + int32x2_t __s0_166 = __p0_166; \ + int32x2_t __s1_166 = __p1_166; \ + int32x2_t __s2_166 = __p2_166; \ + int32x2_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 1, 0); \ + int32x2_t __rev1_166; __rev1_166 = __builtin_shufflevector(__s1_166, __s1_166, 1, 0); \ + int32x2_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 1, 0); \ + __ret_166 = __noswap_vqrdmlah_s32(__rev0_166, __rev1_166, __noswap_splat_lane_s32(__rev2_166, __p3_166)); \ + __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 1, 0); \ + __ret_166; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ + int16x4_t __ret_167; \ + int16x4_t __s0_167 = __p0_167; \ + int16x4_t __s1_167 = __p1_167; \ + int16x4_t __s2_167 = __p2_167; \ + __ret_167 = vqrdmlah_s16(__s0_167, __s1_167, splat_lane_s16(__s2_167, __p3_167)); \ + __ret_167; \ +}) +#else +#define vqrdmlah_lane_s16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ + int16x4_t __ret_168; \ + int16x4_t __s0_168 = __p0_168; \ + int16x4_t __s1_168 = __p1_168; \ + int16x4_t __s2_168 = __p2_168; \ + int16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ + int16x4_t __rev1_168; __rev1_168 = __builtin_shufflevector(__s1_168, __s1_168, 3, 2, 1, 0); \ + int16x4_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 3, 2, 1, 0); \ + __ret_168 = __noswap_vqrdmlah_s16(__rev0_168, __rev1_168, __noswap_splat_lane_s16(__rev2_168, __p3_168)); \ + __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ + __ret_168; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ + int32x4_t __ret_169; \ + int32x4_t __s0_169 = __p0_169; \ + int32x4_t __s1_169 = __p1_169; \ + int32x2_t __s2_169 = __p2_169; \ + __ret_169 = vqrdmlshq_s32(__s0_169, __s1_169, splatq_lane_s32(__s2_169, __p3_169)); \ + __ret_169; \ +}) +#else +#define vqrdmlshq_lane_s32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ + int32x4_t __ret_170; \ + int32x4_t __s0_170 = __p0_170; \ + int32x4_t __s1_170 = __p1_170; \ + int32x2_t __s2_170 = __p2_170; \ + int32x4_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \ + int32x4_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 3, 2, 1, 0); \ + int32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ + __ret_170 = __noswap_vqrdmlshq_s32(__rev0_170, __rev1_170, __noswap_splatq_lane_s32(__rev2_170, __p3_170)); \ + __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \ + __ret_170; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ + int16x8_t __ret_171; \ + int16x8_t __s0_171 = __p0_171; \ + int16x8_t __s1_171 = __p1_171; \ + int16x4_t __s2_171 = __p2_171; \ + __ret_171 = vqrdmlshq_s16(__s0_171, __s1_171, splatq_lane_s16(__s2_171, __p3_171)); \ + __ret_171; \ +}) +#else +#define vqrdmlshq_lane_s16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ + int16x8_t __ret_172; \ + int16x8_t __s0_172 = __p0_172; \ + int16x8_t __s1_172 = __p1_172; \ + int16x4_t __s2_172 = __p2_172; \ + int16x8_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 3, 2, 1, 0); \ + __ret_172 = __noswap_vqrdmlshq_s16(__rev0_172, __rev1_172, __noswap_splatq_lane_s16(__rev2_172, __p3_172)); \ + __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_172; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ + int32x2_t __ret_173; \ + int32x2_t __s0_173 = __p0_173; \ + int32x2_t __s1_173 = __p1_173; \ + int32x2_t __s2_173 = __p2_173; \ + __ret_173 = vqrdmlsh_s32(__s0_173, __s1_173, splat_lane_s32(__s2_173, __p3_173)); \ + __ret_173; \ +}) +#else +#define vqrdmlsh_lane_s32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ + int32x2_t __ret_174; \ + int32x2_t __s0_174 = __p0_174; \ + int32x2_t __s1_174 = __p1_174; \ + int32x2_t __s2_174 = __p2_174; \ + int32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ + int32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ + int32x2_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 1, 0); \ + __ret_174 = __noswap_vqrdmlsh_s32(__rev0_174, __rev1_174, __noswap_splat_lane_s32(__rev2_174, __p3_174)); \ + __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ + __ret_174; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ + int16x4_t __ret_175; \ + int16x4_t __s0_175 = __p0_175; \ + int16x4_t __s1_175 = __p1_175; \ + int16x4_t __s2_175 = __p2_175; \ + __ret_175 = vqrdmlsh_s16(__s0_175, __s1_175, splat_lane_s16(__s2_175, __p3_175)); \ + __ret_175; \ +}) +#else +#define vqrdmlsh_lane_s16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ + int16x4_t __ret_176; \ + int16x4_t __s0_176 = __p0_176; \ + int16x4_t __s1_176 = __p1_176; \ + int16x4_t __s2_176 = __p2_176; \ + int16x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ + int16x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ + int16x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ + __ret_176 = __noswap_vqrdmlsh_s16(__rev0_176, __rev1_176, __noswap_splat_lane_s16(__rev2_176, __p3_176)); \ + __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ + __ret_176; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ + float32x2_t __ret_177; \ + float32x2_t __s0_177 = __p0_177; \ + float32x2_t __s1_177 = __p1_177; \ + float32x2_t __s2_177 = __p2_177; \ +float32x2_t __reint_177 = __s2_177; \ +uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ + __ret_177 = vcmla_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ + __ret_177; \ +}) +#else +#define vcmla_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ + float32x2_t __ret_178; \ + float32x2_t __s0_178 = __p0_178; \ + float32x2_t __s1_178 = __p1_178; \ + float32x2_t __s2_178 = __p2_178; \ + float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ + float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ + float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ +float32x2_t __reint_178 = __rev2_178; \ +uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ + __ret_178 = __noswap_vcmla_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ + __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ + __ret_178; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ + float32x4_t __ret_179; \ + float32x4_t __s0_179 = __p0_179; \ + float32x4_t __s1_179 = __p1_179; \ + float32x2_t __s2_179 = __p2_179; \ +float32x2_t __reint_179 = __s2_179; \ +uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ + __ret_179 = vcmlaq_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ + __ret_179; \ +}) +#else +#define vcmlaq_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ + float32x4_t __ret_180; \ + float32x4_t __s0_180 = __p0_180; \ + float32x4_t __s1_180 = __p1_180; \ + float32x2_t __s2_180 = __p2_180; \ + float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ + float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ + float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ +float32x2_t __reint_180 = __rev2_180; \ +uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ + __ret_180 = __noswap_vcmlaq_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ + __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ + __ret_180; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ + float32x2_t __ret_181; \ + float32x2_t __s0_181 = __p0_181; \ + float32x2_t __s1_181 = __p1_181; \ + float32x4_t __s2_181 = __p2_181; \ +float32x4_t __reint_181 = __s2_181; \ +uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ + __ret_181 = vcmla_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ + __ret_181; \ +}) +#else +#define vcmla_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ + float32x2_t __ret_182; \ + float32x2_t __s0_182 = __p0_182; \ + float32x2_t __s1_182 = __p1_182; \ + float32x4_t __s2_182 = __p2_182; \ + float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ + float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ + float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ +float32x4_t __reint_182 = __rev2_182; \ +uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ + __ret_182 = __noswap_vcmla_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ + __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ + __ret_182; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ + float32x4_t __ret_183; \ + float32x4_t __s0_183 = __p0_183; \ + float32x4_t __s1_183 = __p1_183; \ + float32x4_t __s2_183 = __p2_183; \ +float32x4_t __reint_183 = __s2_183; \ +uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ + __ret_183 = vcmlaq_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ + __ret_183; \ +}) +#else +#define vcmlaq_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ + float32x4_t __ret_184; \ + float32x4_t __s0_184 = __p0_184; \ + float32x4_t __s1_184 = __p1_184; \ + float32x4_t __s2_184 = __p2_184; \ + float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ + float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ + float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ +float32x4_t __reint_184 = __rev2_184; \ +uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ + __ret_184 = __noswap_vcmlaq_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ + __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ + __ret_184; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ + float32x2_t __ret_185; \ + float32x2_t __s0_185 = __p0_185; \ + float32x2_t __s1_185 = __p1_185; \ + float32x2_t __s2_185 = __p2_185; \ +float32x2_t __reint_185 = __s2_185; \ +uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ + __ret_185 = vcmla_rot180_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ + __ret_185; \ +}) +#else +#define vcmla_rot180_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ + float32x2_t __ret_186; \ + float32x2_t __s0_186 = __p0_186; \ + float32x2_t __s1_186 = __p1_186; \ + float32x2_t __s2_186 = __p2_186; \ + float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ + float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ + float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ +float32x2_t __reint_186 = __rev2_186; \ +uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ + __ret_186 = __noswap_vcmla_rot180_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ + __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ + __ret_186; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ + float32x4_t __ret_187; \ + float32x4_t __s0_187 = __p0_187; \ + float32x4_t __s1_187 = __p1_187; \ + float32x2_t __s2_187 = __p2_187; \ +float32x2_t __reint_187 = __s2_187; \ +uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ + __ret_187 = vcmlaq_rot180_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ + __ret_187; \ +}) +#else +#define vcmlaq_rot180_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ + float32x4_t __ret_188; \ + float32x4_t __s0_188 = __p0_188; \ + float32x4_t __s1_188 = __p1_188; \ + float32x2_t __s2_188 = __p2_188; \ + float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ + float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ + float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ +float32x2_t __reint_188 = __rev2_188; \ +uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ + __ret_188 = __noswap_vcmlaq_rot180_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ + __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ + __ret_188; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ + float32x2_t __ret_189; \ + float32x2_t __s0_189 = __p0_189; \ + float32x2_t __s1_189 = __p1_189; \ + float32x4_t __s2_189 = __p2_189; \ +float32x4_t __reint_189 = __s2_189; \ +uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ + __ret_189 = vcmla_rot180_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ + __ret_189; \ +}) +#else +#define vcmla_rot180_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ + float32x2_t __ret_190; \ + float32x2_t __s0_190 = __p0_190; \ + float32x2_t __s1_190 = __p1_190; \ + float32x4_t __s2_190 = __p2_190; \ + float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ + float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ + float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ +float32x4_t __reint_190 = __rev2_190; \ +uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ + __ret_190 = __noswap_vcmla_rot180_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ + __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ + __ret_190; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ + float32x4_t __ret_191; \ + float32x4_t __s0_191 = __p0_191; \ + float32x4_t __s1_191 = __p1_191; \ + float32x4_t __s2_191 = __p2_191; \ +float32x4_t __reint_191 = __s2_191; \ +uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ + __ret_191 = vcmlaq_rot180_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ + __ret_191; \ +}) +#else +#define vcmlaq_rot180_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ + float32x4_t __ret_192; \ + float32x4_t __s0_192 = __p0_192; \ + float32x4_t __s1_192 = __p1_192; \ + float32x4_t __s2_192 = __p2_192; \ + float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ + float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ + float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ +float32x4_t __reint_192 = __rev2_192; \ +uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ + __ret_192 = __noswap_vcmlaq_rot180_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ + __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ + __ret_192; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ + float32x2_t __ret_193; \ + float32x2_t __s0_193 = __p0_193; \ + float32x2_t __s1_193 = __p1_193; \ + float32x2_t __s2_193 = __p2_193; \ +float32x2_t __reint_193 = __s2_193; \ +uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ + __ret_193 = vcmla_rot270_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ + __ret_193; \ +}) +#else +#define vcmla_rot270_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ + float32x2_t __ret_194; \ + float32x2_t __s0_194 = __p0_194; \ + float32x2_t __s1_194 = __p1_194; \ + float32x2_t __s2_194 = __p2_194; \ + float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ + float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ + float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ +float32x2_t __reint_194 = __rev2_194; \ +uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ + __ret_194 = __noswap_vcmla_rot270_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ + __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ + __ret_194; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ + float32x4_t __ret_195; \ + float32x4_t __s0_195 = __p0_195; \ + float32x4_t __s1_195 = __p1_195; \ + float32x2_t __s2_195 = __p2_195; \ +float32x2_t __reint_195 = __s2_195; \ +uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ + __ret_195 = vcmlaq_rot270_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ + __ret_195; \ +}) +#else +#define vcmlaq_rot270_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ + float32x4_t __ret_196; \ + float32x4_t __s0_196 = __p0_196; \ + float32x4_t __s1_196 = __p1_196; \ + float32x2_t __s2_196 = __p2_196; \ + float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ + float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ + float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ +float32x2_t __reint_196 = __rev2_196; \ +uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ + __ret_196 = __noswap_vcmlaq_rot270_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ + __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ + __ret_196; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ + float32x2_t __ret_197; \ + float32x2_t __s0_197 = __p0_197; \ + float32x2_t __s1_197 = __p1_197; \ + float32x4_t __s2_197 = __p2_197; \ +float32x4_t __reint_197 = __s2_197; \ +uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ + __ret_197 = vcmla_rot270_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ + __ret_197; \ +}) +#else +#define vcmla_rot270_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ + float32x2_t __ret_198; \ + float32x2_t __s0_198 = __p0_198; \ + float32x2_t __s1_198 = __p1_198; \ + float32x4_t __s2_198 = __p2_198; \ + float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ + float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ + float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ +float32x4_t __reint_198 = __rev2_198; \ +uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ + __ret_198 = __noswap_vcmla_rot270_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ + __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ + __ret_198; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ + float32x4_t __ret_199; \ + float32x4_t __s0_199 = __p0_199; \ + float32x4_t __s1_199 = __p1_199; \ + float32x4_t __s2_199 = __p2_199; \ +float32x4_t __reint_199 = __s2_199; \ +uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ + __ret_199 = vcmlaq_rot270_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ + __ret_199; \ +}) +#else +#define vcmlaq_rot270_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ + float32x4_t __ret_200; \ + float32x4_t __s0_200 = __p0_200; \ + float32x4_t __s1_200 = __p1_200; \ + float32x4_t __s2_200 = __p2_200; \ + float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ + float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ + float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ +float32x4_t __reint_200 = __rev2_200; \ +uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ + __ret_200 = __noswap_vcmlaq_rot270_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ + __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ + __ret_200; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ + float32x2_t __ret_201; \ + float32x2_t __s0_201 = __p0_201; \ + float32x2_t __s1_201 = __p1_201; \ + float32x2_t __s2_201 = __p2_201; \ +float32x2_t __reint_201 = __s2_201; \ +uint64x1_t __reint1_201 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ + __ret_201 = vcmla_rot90_f32(__s0_201, __s1_201, *(float32x2_t *) &__reint1_201); \ + __ret_201; \ +}) +#else +#define vcmla_rot90_lane_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ + float32x2_t __ret_202; \ + float32x2_t __s0_202 = __p0_202; \ + float32x2_t __s1_202 = __p1_202; \ + float32x2_t __s2_202 = __p2_202; \ + float32x2_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 1, 0); \ + float32x2_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 1, 0); \ + float32x2_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 1, 0); \ +float32x2_t __reint_202 = __rev2_202; \ +uint64x1_t __reint1_202 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_202, __p3_202)}; \ + __ret_202 = __noswap_vcmla_rot90_f32(__rev0_202, __rev1_202, *(float32x2_t *) &__reint1_202); \ + __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 1, 0); \ + __ret_202; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ + float32x4_t __ret_203; \ + float32x4_t __s0_203 = __p0_203; \ + float32x4_t __s1_203 = __p1_203; \ + float32x2_t __s2_203 = __p2_203; \ +float32x2_t __reint_203 = __s2_203; \ +uint64x2_t __reint1_203 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203), vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203)}; \ + __ret_203 = vcmlaq_rot90_f32(__s0_203, __s1_203, *(float32x4_t *) &__reint1_203); \ + __ret_203; \ +}) +#else +#define vcmlaq_rot90_lane_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ + float32x4_t __ret_204; \ + float32x4_t __s0_204 = __p0_204; \ + float32x4_t __s1_204 = __p1_204; \ + float32x2_t __s2_204 = __p2_204; \ + float32x4_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \ + float32x4_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 3, 2, 1, 0); \ + float32x2_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 1, 0); \ +float32x2_t __reint_204 = __rev2_204; \ +uint64x2_t __reint1_204 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204), vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204)}; \ + __ret_204 = __noswap_vcmlaq_rot90_f32(__rev0_204, __rev1_204, *(float32x4_t *) &__reint1_204); \ + __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \ + __ret_204; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ + float32x2_t __ret_205; \ + float32x2_t __s0_205 = __p0_205; \ + float32x2_t __s1_205 = __p1_205; \ + float32x4_t __s2_205 = __p2_205; \ +float32x4_t __reint_205 = __s2_205; \ +uint64x1_t __reint1_205 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ + __ret_205 = vcmla_rot90_f32(__s0_205, __s1_205, *(float32x2_t *) &__reint1_205); \ + __ret_205; \ +}) +#else +#define vcmla_rot90_laneq_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ + float32x2_t __ret_206; \ + float32x2_t __s0_206 = __p0_206; \ + float32x2_t __s1_206 = __p1_206; \ + float32x4_t __s2_206 = __p2_206; \ + float32x2_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 1, 0); \ + float32x2_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 1, 0); \ + float32x4_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 3, 2, 1, 0); \ +float32x4_t __reint_206 = __rev2_206; \ +uint64x1_t __reint1_206 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_206, __p3_206)}; \ + __ret_206 = __noswap_vcmla_rot90_f32(__rev0_206, __rev1_206, *(float32x2_t *) &__reint1_206); \ + __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 1, 0); \ + __ret_206; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ + float32x4_t __ret_207; \ + float32x4_t __s0_207 = __p0_207; \ + float32x4_t __s1_207 = __p1_207; \ + float32x4_t __s2_207 = __p2_207; \ +float32x4_t __reint_207 = __s2_207; \ +uint64x2_t __reint1_207 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207), vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207)}; \ + __ret_207 = vcmlaq_rot90_f32(__s0_207, __s1_207, *(float32x4_t *) &__reint1_207); \ + __ret_207; \ +}) +#else +#define vcmlaq_rot90_laneq_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ + float32x4_t __ret_208; \ + float32x4_t __s0_208 = __p0_208; \ + float32x4_t __s1_208 = __p1_208; \ + float32x4_t __s2_208 = __p2_208; \ + float32x4_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \ + float32x4_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \ + float32x4_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 3, 2, 1, 0); \ +float32x4_t __reint_208 = __rev2_208; \ +uint64x2_t __reint1_208 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208)}; \ + __ret_208 = __noswap_vcmlaq_rot90_f32(__rev0_208, __rev1_208, *(float32x4_t *) &__reint1_208); \ + __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \ + __ret_208; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ + float16x4_t __ret_209; \ + float16x4_t __s0_209 = __p0_209; \ + float16x4_t __s1_209 = __p1_209; \ + float16x4_t __s2_209 = __p2_209; \ +float16x4_t __reint_209 = __s2_209; \ +uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ + __ret_209 = vcmla_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ + __ret_209; \ +}) +#else +#define vcmla_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ + float16x4_t __ret_210; \ + float16x4_t __s0_210 = __p0_210; \ + float16x4_t __s1_210 = __p1_210; \ + float16x4_t __s2_210 = __p2_210; \ + float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ + float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ + float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ +float16x4_t __reint_210 = __rev2_210; \ +uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ + __ret_210 = __noswap_vcmla_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ + __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ + __ret_210; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ + float16x8_t __ret_211; \ + float16x8_t __s0_211 = __p0_211; \ + float16x8_t __s1_211 = __p1_211; \ + float16x4_t __s2_211 = __p2_211; \ +float16x4_t __reint_211 = __s2_211; \ +uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ + __ret_211 = vcmlaq_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ + __ret_211; \ +}) +#else +#define vcmlaq_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ + float16x8_t __ret_212; \ + float16x8_t __s0_212 = __p0_212; \ + float16x8_t __s1_212 = __p1_212; \ + float16x4_t __s2_212 = __p2_212; \ + float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ +float16x4_t __reint_212 = __rev2_212; \ +uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ + __ret_212 = __noswap_vcmlaq_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ + __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_212; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ + float16x4_t __ret_213; \ + float16x4_t __s0_213 = __p0_213; \ + float16x4_t __s1_213 = __p1_213; \ + float16x8_t __s2_213 = __p2_213; \ +float16x8_t __reint_213 = __s2_213; \ +uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ + __ret_213 = vcmla_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ + __ret_213; \ +}) +#else +#define vcmla_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ + float16x4_t __ret_214; \ + float16x4_t __s0_214 = __p0_214; \ + float16x4_t __s1_214 = __p1_214; \ + float16x8_t __s2_214 = __p2_214; \ + float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ + float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ + float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_214 = __rev2_214; \ +uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ + __ret_214 = __noswap_vcmla_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ + __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ + __ret_214; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ + float16x8_t __ret_215; \ + float16x8_t __s0_215 = __p0_215; \ + float16x8_t __s1_215 = __p1_215; \ + float16x8_t __s2_215 = __p2_215; \ +float16x8_t __reint_215 = __s2_215; \ +uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ + __ret_215 = vcmlaq_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ + __ret_215; \ +}) +#else +#define vcmlaq_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ + float16x8_t __ret_216; \ + float16x8_t __s0_216 = __p0_216; \ + float16x8_t __s1_216 = __p1_216; \ + float16x8_t __s2_216 = __p2_216; \ + float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_216 = __rev2_216; \ +uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ + __ret_216 = __noswap_vcmlaq_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ + __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_216; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ + float16x4_t __ret_217; \ + float16x4_t __s0_217 = __p0_217; \ + float16x4_t __s1_217 = __p1_217; \ + float16x4_t __s2_217 = __p2_217; \ +float16x4_t __reint_217 = __s2_217; \ +uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ + __ret_217 = vcmla_rot180_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ + __ret_217; \ +}) +#else +#define vcmla_rot180_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ + float16x4_t __ret_218; \ + float16x4_t __s0_218 = __p0_218; \ + float16x4_t __s1_218 = __p1_218; \ + float16x4_t __s2_218 = __p2_218; \ + float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ + float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ + float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ +float16x4_t __reint_218 = __rev2_218; \ +uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ + __ret_218 = __noswap_vcmla_rot180_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ + __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ + __ret_218; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ + float16x8_t __ret_219; \ + float16x8_t __s0_219 = __p0_219; \ + float16x8_t __s1_219 = __p1_219; \ + float16x4_t __s2_219 = __p2_219; \ +float16x4_t __reint_219 = __s2_219; \ +uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ + __ret_219 = vcmlaq_rot180_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ + __ret_219; \ +}) +#else +#define vcmlaq_rot180_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ + float16x8_t __ret_220; \ + float16x8_t __s0_220 = __p0_220; \ + float16x8_t __s1_220 = __p1_220; \ + float16x4_t __s2_220 = __p2_220; \ + float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ +float16x4_t __reint_220 = __rev2_220; \ +uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ + __ret_220 = __noswap_vcmlaq_rot180_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ + __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_220; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ + float16x4_t __ret_221; \ + float16x4_t __s0_221 = __p0_221; \ + float16x4_t __s1_221 = __p1_221; \ + float16x8_t __s2_221 = __p2_221; \ +float16x8_t __reint_221 = __s2_221; \ +uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ + __ret_221 = vcmla_rot180_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ + __ret_221; \ +}) +#else +#define vcmla_rot180_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ + float16x4_t __ret_222; \ + float16x4_t __s0_222 = __p0_222; \ + float16x4_t __s1_222 = __p1_222; \ + float16x8_t __s2_222 = __p2_222; \ + float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ + float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ + float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_222 = __rev2_222; \ +uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ + __ret_222 = __noswap_vcmla_rot180_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ + __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ + __ret_222; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ + float16x8_t __ret_223; \ + float16x8_t __s0_223 = __p0_223; \ + float16x8_t __s1_223 = __p1_223; \ + float16x8_t __s2_223 = __p2_223; \ +float16x8_t __reint_223 = __s2_223; \ +uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ + __ret_223 = vcmlaq_rot180_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ + __ret_223; \ +}) +#else +#define vcmlaq_rot180_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ + float16x8_t __ret_224; \ + float16x8_t __s0_224 = __p0_224; \ + float16x8_t __s1_224 = __p1_224; \ + float16x8_t __s2_224 = __p2_224; \ + float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_224 = __rev2_224; \ +uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ + __ret_224 = __noswap_vcmlaq_rot180_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ + __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_224; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ + float16x4_t __ret_225; \ + float16x4_t __s0_225 = __p0_225; \ + float16x4_t __s1_225 = __p1_225; \ + float16x4_t __s2_225 = __p2_225; \ +float16x4_t __reint_225 = __s2_225; \ +uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ + __ret_225 = vcmla_rot270_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ + __ret_225; \ +}) +#else +#define vcmla_rot270_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ + float16x4_t __ret_226; \ + float16x4_t __s0_226 = __p0_226; \ + float16x4_t __s1_226 = __p1_226; \ + float16x4_t __s2_226 = __p2_226; \ + float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ + float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ + float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ +float16x4_t __reint_226 = __rev2_226; \ +uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ + __ret_226 = __noswap_vcmla_rot270_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ + __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ + __ret_226; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ + float16x8_t __ret_227; \ + float16x8_t __s0_227 = __p0_227; \ + float16x8_t __s1_227 = __p1_227; \ + float16x4_t __s2_227 = __p2_227; \ +float16x4_t __reint_227 = __s2_227; \ +uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ + __ret_227 = vcmlaq_rot270_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ + __ret_227; \ +}) +#else +#define vcmlaq_rot270_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ + float16x8_t __ret_228; \ + float16x8_t __s0_228 = __p0_228; \ + float16x8_t __s1_228 = __p1_228; \ + float16x4_t __s2_228 = __p2_228; \ + float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ +float16x4_t __reint_228 = __rev2_228; \ +uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ + __ret_228 = __noswap_vcmlaq_rot270_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ + __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_228; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ + float16x4_t __ret_229; \ + float16x4_t __s0_229 = __p0_229; \ + float16x4_t __s1_229 = __p1_229; \ + float16x8_t __s2_229 = __p2_229; \ +float16x8_t __reint_229 = __s2_229; \ +uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ + __ret_229 = vcmla_rot270_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ + __ret_229; \ +}) +#else +#define vcmla_rot270_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ + float16x4_t __ret_230; \ + float16x4_t __s0_230 = __p0_230; \ + float16x4_t __s1_230 = __p1_230; \ + float16x8_t __s2_230 = __p2_230; \ + float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ + float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ + float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_230 = __rev2_230; \ +uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ + __ret_230 = __noswap_vcmla_rot270_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ + __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ + __ret_230; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ + float16x8_t __ret_231; \ + float16x8_t __s0_231 = __p0_231; \ + float16x8_t __s1_231 = __p1_231; \ + float16x8_t __s2_231 = __p2_231; \ +float16x8_t __reint_231 = __s2_231; \ +uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ + __ret_231 = vcmlaq_rot270_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ + __ret_231; \ +}) +#else +#define vcmlaq_rot270_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ + float16x8_t __ret_232; \ + float16x8_t __s0_232 = __p0_232; \ + float16x8_t __s1_232 = __p1_232; \ + float16x8_t __s2_232 = __p2_232; \ + float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_232 = __rev2_232; \ +uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ + __ret_232 = __noswap_vcmlaq_rot270_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ + __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_232; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ + float16x4_t __ret_233; \ + float16x4_t __s0_233 = __p0_233; \ + float16x4_t __s1_233 = __p1_233; \ + float16x4_t __s2_233 = __p2_233; \ +float16x4_t __reint_233 = __s2_233; \ +uint32x2_t __reint1_233 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233), vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233)}; \ + __ret_233 = vcmla_rot90_f16(__s0_233, __s1_233, *(float16x4_t *) &__reint1_233); \ + __ret_233; \ +}) +#else +#define vcmla_rot90_lane_f16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ + float16x4_t __ret_234; \ + float16x4_t __s0_234 = __p0_234; \ + float16x4_t __s1_234 = __p1_234; \ + float16x4_t __s2_234 = __p2_234; \ + float16x4_t __rev0_234; __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \ + float16x4_t __rev1_234; __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \ + float16x4_t __rev2_234; __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \ +float16x4_t __reint_234 = __rev2_234; \ +uint32x2_t __reint1_234 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234)}; \ + __ret_234 = __noswap_vcmla_rot90_f16(__rev0_234, __rev1_234, *(float16x4_t *) &__reint1_234); \ + __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \ + __ret_234; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ + float16x8_t __ret_235; \ + float16x8_t __s0_235 = __p0_235; \ + float16x8_t __s1_235 = __p1_235; \ + float16x4_t __s2_235 = __p2_235; \ +float16x4_t __reint_235 = __s2_235; \ +uint32x4_t __reint1_235 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235)}; \ + __ret_235 = vcmlaq_rot90_f16(__s0_235, __s1_235, *(float16x8_t *) &__reint1_235); \ + __ret_235; \ +}) +#else +#define vcmlaq_rot90_lane_f16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ + float16x8_t __ret_236; \ + float16x8_t __s0_236 = __p0_236; \ + float16x8_t __s1_236 = __p1_236; \ + float16x4_t __s2_236 = __p2_236; \ + float16x8_t __rev0_236; __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_236; __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_236; __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 3, 2, 1, 0); \ +float16x4_t __reint_236 = __rev2_236; \ +uint32x4_t __reint1_236 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236)}; \ + __ret_236 = __noswap_vcmlaq_rot90_f16(__rev0_236, __rev1_236, *(float16x8_t *) &__reint1_236); \ + __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_236; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ + float16x4_t __ret_237; \ + float16x4_t __s0_237 = __p0_237; \ + float16x4_t __s1_237 = __p1_237; \ + float16x8_t __s2_237 = __p2_237; \ +float16x8_t __reint_237 = __s2_237; \ +uint32x2_t __reint1_237 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237), vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237)}; \ + __ret_237 = vcmla_rot90_f16(__s0_237, __s1_237, *(float16x4_t *) &__reint1_237); \ + __ret_237; \ +}) +#else +#define vcmla_rot90_laneq_f16(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ + float16x4_t __ret_238; \ + float16x4_t __s0_238 = __p0_238; \ + float16x4_t __s1_238 = __p1_238; \ + float16x8_t __s2_238 = __p2_238; \ + float16x4_t __rev0_238; __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 3, 2, 1, 0); \ + float16x4_t __rev1_238; __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 3, 2, 1, 0); \ + float16x8_t __rev2_238; __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_238 = __rev2_238; \ +uint32x2_t __reint1_238 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238)}; \ + __ret_238 = __noswap_vcmla_rot90_f16(__rev0_238, __rev1_238, *(float16x4_t *) &__reint1_238); \ + __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 3, 2, 1, 0); \ + __ret_238; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ + float16x8_t __ret_239; \ + float16x8_t __s0_239 = __p0_239; \ + float16x8_t __s1_239 = __p1_239; \ + float16x8_t __s2_239 = __p2_239; \ +float16x8_t __reint_239 = __s2_239; \ +uint32x4_t __reint1_239 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239)}; \ + __ret_239 = vcmlaq_rot90_f16(__s0_239, __s1_239, *(float16x8_t *) &__reint1_239); \ + __ret_239; \ +}) +#else +#define vcmlaq_rot90_laneq_f16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ + float16x8_t __ret_240; \ + float16x8_t __s0_240 = __p0_240; \ + float16x8_t __s1_240 = __p1_240; \ + float16x8_t __s2_240 = __p2_240; \ + float16x8_t __rev0_240; __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_240; __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_240; __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_240 = __rev2_240; \ +uint32x4_t __reint1_240 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240)}; \ + __ret_240 = __noswap_vcmlaq_rot90_f16(__rev0_240, __rev1_240, *(float16x8_t *) &__reint1_240); \ + __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_240; \ +}) +#endif + +#if !defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0_241, __p1_241, __p2_241) __extension__ ({ \ + int32x4_t __ret_241; \ + int32x4_t __s0_241 = __p0_241; \ + int32x2_t __s1_241 = __p1_241; \ + __ret_241 = vqdmulhq_s32(__s0_241, splatq_lane_s32(__s1_241, __p2_241)); \ + __ret_241; \ +}) +#else +#define vqdmulhq_lane_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \ + int32x4_t __ret_242; \ + int32x4_t __s0_242 = __p0_242; \ + int32x2_t __s1_242 = __p1_242; \ + int32x4_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \ + int32x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ + __ret_242 = __noswap_vqdmulhq_s32(__rev0_242, __noswap_splatq_lane_s32(__rev1_242, __p2_242)); \ + __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \ + __ret_242; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0_243, __p1_243, __p2_243) __extension__ ({ \ + int16x8_t __ret_243; \ + int16x8_t __s0_243 = __p0_243; \ + int16x4_t __s1_243 = __p1_243; \ + __ret_243 = vqdmulhq_s16(__s0_243, splatq_lane_s16(__s1_243, __p2_243)); \ + __ret_243; \ +}) +#else +#define vqdmulhq_lane_s16(__p0_244, __p1_244, __p2_244) __extension__ ({ \ + int16x8_t __ret_244; \ + int16x8_t __s0_244 = __p0_244; \ + int16x4_t __s1_244 = __p1_244; \ + int16x8_t __rev0_244; __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_244; __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 3, 2, 1, 0); \ + __ret_244 = __noswap_vqdmulhq_s16(__rev0_244, __noswap_splatq_lane_s16(__rev1_244, __p2_244)); \ + __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_244; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0_245, __p1_245, __p2_245) __extension__ ({ \ + int32x2_t __ret_245; \ + int32x2_t __s0_245 = __p0_245; \ + int32x2_t __s1_245 = __p1_245; \ + __ret_245 = vqdmulh_s32(__s0_245, splat_lane_s32(__s1_245, __p2_245)); \ + __ret_245; \ +}) +#else +#define vqdmulh_lane_s32(__p0_246, __p1_246, __p2_246) __extension__ ({ \ + int32x2_t __ret_246; \ + int32x2_t __s0_246 = __p0_246; \ + int32x2_t __s1_246 = __p1_246; \ + int32x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ + int32x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ + __ret_246 = __noswap_vqdmulh_s32(__rev0_246, __noswap_splat_lane_s32(__rev1_246, __p2_246)); \ + __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ + __ret_246; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \ + int16x4_t __ret_247; \ + int16x4_t __s0_247 = __p0_247; \ + int16x4_t __s1_247 = __p1_247; \ + __ret_247 = vqdmulh_s16(__s0_247, splat_lane_s16(__s1_247, __p2_247)); \ + __ret_247; \ +}) +#else +#define vqdmulh_lane_s16(__p0_248, __p1_248, __p2_248) __extension__ ({ \ + int16x4_t __ret_248; \ + int16x4_t __s0_248 = __p0_248; \ + int16x4_t __s1_248 = __p1_248; \ + int16x4_t __rev0_248; __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \ + int16x4_t __rev1_248; __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \ + __ret_248 = __noswap_vqdmulh_s16(__rev0_248, __noswap_splat_lane_s16(__rev1_248, __p2_248)); \ + __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \ + __ret_248; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0_249, __p1_249, __p2_249) __extension__ ({ \ + int32x4_t __ret_249; \ + int32x4_t __s0_249 = __p0_249; \ + int32x2_t __s1_249 = __p1_249; \ + __ret_249 = vqrdmulhq_s32(__s0_249, splatq_lane_s32(__s1_249, __p2_249)); \ + __ret_249; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0_250, __p1_250, __p2_250) __extension__ ({ \ + int32x4_t __ret_250; \ + int32x4_t __s0_250 = __p0_250; \ + int32x2_t __s1_250 = __p1_250; \ + int32x4_t __rev0_250; __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 3, 2, 1, 0); \ + int32x2_t __rev1_250; __rev1_250 = __builtin_shufflevector(__s1_250, __s1_250, 1, 0); \ + __ret_250 = __noswap_vqrdmulhq_s32(__rev0_250, __noswap_splatq_lane_s32(__rev1_250, __p2_250)); \ + __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 3, 2, 1, 0); \ + __ret_250; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0_251, __p1_251, __p2_251) __extension__ ({ \ + int16x8_t __ret_251; \ + int16x8_t __s0_251 = __p0_251; \ + int16x4_t __s1_251 = __p1_251; \ + __ret_251 = vqrdmulhq_s16(__s0_251, splatq_lane_s16(__s1_251, __p2_251)); \ + __ret_251; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0_252, __p1_252, __p2_252) __extension__ ({ \ + int16x8_t __ret_252; \ + int16x8_t __s0_252 = __p0_252; \ + int16x4_t __s1_252 = __p1_252; \ + int16x8_t __rev0_252; __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_252; __rev1_252 = __builtin_shufflevector(__s1_252, __s1_252, 3, 2, 1, 0); \ + __ret_252 = __noswap_vqrdmulhq_s16(__rev0_252, __noswap_splatq_lane_s16(__rev1_252, __p2_252)); \ + __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_252; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0_253, __p1_253, __p2_253) __extension__ ({ \ + int32x2_t __ret_253; \ + int32x2_t __s0_253 = __p0_253; \ + int32x2_t __s1_253 = __p1_253; \ + __ret_253 = vqrdmulh_s32(__s0_253, splat_lane_s32(__s1_253, __p2_253)); \ + __ret_253; \ +}) +#else +#define vqrdmulh_lane_s32(__p0_254, __p1_254, __p2_254) __extension__ ({ \ + int32x2_t __ret_254; \ + int32x2_t __s0_254 = __p0_254; \ + int32x2_t __s1_254 = __p1_254; \ + int32x2_t __rev0_254; __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 1, 0); \ + int32x2_t __rev1_254; __rev1_254 = __builtin_shufflevector(__s1_254, __s1_254, 1, 0); \ + __ret_254 = __noswap_vqrdmulh_s32(__rev0_254, __noswap_splat_lane_s32(__rev1_254, __p2_254)); \ + __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 1, 0); \ + __ret_254; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0_255, __p1_255, __p2_255) __extension__ ({ \ + int16x4_t __ret_255; \ + int16x4_t __s0_255 = __p0_255; \ + int16x4_t __s1_255 = __p1_255; \ + __ret_255 = vqrdmulh_s16(__s0_255, splat_lane_s16(__s1_255, __p2_255)); \ + __ret_255; \ +}) +#else +#define vqrdmulh_lane_s16(__p0_256, __p1_256, __p2_256) __extension__ ({ \ + int16x4_t __ret_256; \ + int16x4_t __s0_256 = __p0_256; \ + int16x4_t __s1_256 = __p1_256; \ + int16x4_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \ + int16x4_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 3, 2, 1, 0); \ + __ret_256 = __noswap_vqrdmulh_s16(__rev0_256, __noswap_splat_lane_s16(__rev1_256, __p2_256)); \ + __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \ + __ret_256; \ +}) +#endif + +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__rev0, 11); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = __a32_vcvt_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a32_vcvt_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#endif +#if (__ARM_FP & 2) +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __ret; \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ + __ret; \ +}) +#else +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __ret; \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __ret; \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ + __ret; \ +}) +#else +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __ret; \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __ret; \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ + __ret; \ +}) +#else +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __ret; \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __ret; \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ + __ret; \ +}) +#else +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __ret; \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __ret; \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ + __ret; \ +}) +#else +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __ret; \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __ret; \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ + __ret; \ +}) +#else +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __ret; \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \ +}) +#else +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \ +}) +#else +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ +}) +#else +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ +}) +#else +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ +}) +#else +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ +}) +#else +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ +}) +#else +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ +}) +#else +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ +}) +#else +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ +}) +#else +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ +}) +#else +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ +}) +#else +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ +}) +#else +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ +}) +#else +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ +}) +#else +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ +}) +#else +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ +}) +#else +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ +}) +#else +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ +}) +#else +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ +}) +#else +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ +}) +#else +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ +}) +#else +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ +}) +#endif + +#endif +#if __ARM_ARCH >= 8 +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("sha2"))) uint32_t vsha1h_u32(uint32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float32_t vrndns_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FMA) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vabs_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); + return __ret; +} +__ai int64x1_t vabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int64_t vabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); + return __ret; +} +__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); + return __ret; +} +__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); + return __ret; +} +#else +__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); + return __ret; +} +#else +__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); + return __ret; +} +#else +__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); + return __ret; +} +#else +__ai int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); + return __ret; +} +#else +__ai int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); + return __ret; +} +#else +__ai int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); + return __ret; +} +#else +__ai uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); + return __ret; +} +#else +__ai uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); + return __ret; +} +#else +__ai uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); + return __ret; +} +#else +__ai int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); + return __ret; +} +#else +__ai int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); + return __ret; +} +#else +__ai int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); + return __ret; +} +#else +__ai uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); + return __ret; +} +#else +__ai uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); + return __ret; +} +#else +__ai uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); + return __ret; +} +#else +__ai uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); + return __ret; +} +#else +__ai int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); + return __ret; +} +#else +__ai int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); + return __ret; +} +#else +__ai int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); + return __ret; +} +#else +__ai int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); + return __ret; +} +#else +__ai uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); + return __ret; +} +#else +__ai uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); + return __ret; +} +#else +__ai uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); + return __ret; +} +#else +__ai int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); + return __ret; +} +#else +__ai float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); + return __ret; +} +#else +__ai int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); + return __ret; +} +#else +__ai int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); + return __ret; +} +#endif + +__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); + return __ret; +} +#else +__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + poly64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); + return __ret; +} +__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vceqzd_u64(uint64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); + return __ret; +} +__ai uint64_t vceqzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); + return __ret; +} +__ai uint64_t vceqzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); + return __ret; +} +__ai uint32_t vceqzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vcgezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); + return __ret; +} +__ai uint64_t vcgezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); + return __ret; +} +__ai uint32_t vcgezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vcgtzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); + return __ret; +} +__ai uint64_t vcgtzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); + return __ret; +} +__ai uint32_t vcgtzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vclezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vclezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vclez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vclez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vclezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); + return __ret; +} +__ai uint64_t vclezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); + return __ret; +} +__ai uint32_t vclezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcltz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcltz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vcltzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); + return __ret; +} +__ai uint64_t vcltzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); + return __ret; +} +__ai uint32_t vcltzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p8(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + poly8x16_t __ret_257; \ + poly8x16_t __s0_257 = __p0_257; \ + poly8x8_t __s2_257 = __p2_257; \ + __ret_257 = vsetq_lane_p8(vget_lane_p8(__s2_257, __p3_257), __s0_257, __p1_257); \ + __ret_257; \ +}) +#else +#define vcopyq_lane_p8(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ + poly8x16_t __ret_258; \ + poly8x16_t __s0_258 = __p0_258; \ + poly8x8_t __s2_258 = __p2_258; \ + poly8x16_t __rev0_258; __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_258 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_258, __p3_258), __rev0_258, __p1_258); \ + __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_258; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ + poly16x8_t __ret_259; \ + poly16x8_t __s0_259 = __p0_259; \ + poly16x4_t __s2_259 = __p2_259; \ + __ret_259 = vsetq_lane_p16(vget_lane_p16(__s2_259, __p3_259), __s0_259, __p1_259); \ + __ret_259; \ +}) +#else +#define vcopyq_lane_p16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ + poly16x8_t __ret_260; \ + poly16x8_t __s0_260 = __p0_260; \ + poly16x4_t __s2_260 = __p2_260; \ + poly16x8_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \ + __ret_260 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_260, __p3_260), __rev0_260, __p1_260); \ + __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_260; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ + uint8x16_t __ret_261; \ + uint8x16_t __s0_261 = __p0_261; \ + uint8x8_t __s2_261 = __p2_261; \ + __ret_261 = vsetq_lane_u8(vget_lane_u8(__s2_261, __p3_261), __s0_261, __p1_261); \ + __ret_261; \ +}) +#else +#define vcopyq_lane_u8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ + uint8x16_t __ret_262; \ + uint8x16_t __s0_262 = __p0_262; \ + uint8x8_t __s2_262 = __p2_262; \ + uint8x16_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_262 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_262, __p3_262), __rev0_262, __p1_262); \ + __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_262; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ + uint32x4_t __ret_263; \ + uint32x4_t __s0_263 = __p0_263; \ + uint32x2_t __s2_263 = __p2_263; \ + __ret_263 = vsetq_lane_u32(vget_lane_u32(__s2_263, __p3_263), __s0_263, __p1_263); \ + __ret_263; \ +}) +#else +#define vcopyq_lane_u32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ + uint32x4_t __ret_264; \ + uint32x4_t __s0_264 = __p0_264; \ + uint32x2_t __s2_264 = __p2_264; \ + uint32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ + uint32x2_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \ + __ret_264 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_264, __p3_264), __rev0_264, __p1_264); \ + __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ + __ret_264; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u64(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ + uint64x2_t __ret_265; \ + uint64x2_t __s0_265 = __p0_265; \ + uint64x1_t __s2_265 = __p2_265; \ + __ret_265 = vsetq_lane_u64(vget_lane_u64(__s2_265, __p3_265), __s0_265, __p1_265); \ + __ret_265; \ +}) +#else +#define vcopyq_lane_u64(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ + uint64x2_t __ret_266; \ + uint64x2_t __s0_266 = __p0_266; \ + uint64x1_t __s2_266 = __p2_266; \ + uint64x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ + __ret_266 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_266, __p3_266), __rev0_266, __p1_266); \ + __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ + __ret_266; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ + uint16x8_t __ret_267; \ + uint16x8_t __s0_267 = __p0_267; \ + uint16x4_t __s2_267 = __p2_267; \ + __ret_267 = vsetq_lane_u16(vget_lane_u16(__s2_267, __p3_267), __s0_267, __p1_267); \ + __ret_267; \ +}) +#else +#define vcopyq_lane_u16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ + uint16x8_t __ret_268; \ + uint16x8_t __s0_268 = __p0_268; \ + uint16x4_t __s2_268 = __p2_268; \ + uint16x8_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 3, 2, 1, 0); \ + __ret_268 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_268, __p3_268), __rev0_268, __p1_268); \ + __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_268; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s8(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ + int8x16_t __ret_269; \ + int8x16_t __s0_269 = __p0_269; \ + int8x8_t __s2_269 = __p2_269; \ + __ret_269 = vsetq_lane_s8(vget_lane_s8(__s2_269, __p3_269), __s0_269, __p1_269); \ + __ret_269; \ +}) +#else +#define vcopyq_lane_s8(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + int8x16_t __ret_270; \ + int8x16_t __s0_270 = __p0_270; \ + int8x8_t __s2_270 = __p2_270; \ + int8x16_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_270 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_270, __p3_270), __rev0_270, __p1_270); \ + __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_270; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + float32x4_t __ret_271; \ + float32x4_t __s0_271 = __p0_271; \ + float32x2_t __s2_271 = __p2_271; \ + __ret_271 = vsetq_lane_f32(vget_lane_f32(__s2_271, __p3_271), __s0_271, __p1_271); \ + __ret_271; \ +}) +#else +#define vcopyq_lane_f32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + float32x4_t __ret_272; \ + float32x4_t __s0_272 = __p0_272; \ + float32x2_t __s2_272 = __p2_272; \ + float32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ + float32x2_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 1, 0); \ + __ret_272 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_272, __p3_272), __rev0_272, __p1_272); \ + __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ + __ret_272; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + int32x4_t __ret_273; \ + int32x4_t __s0_273 = __p0_273; \ + int32x2_t __s2_273 = __p2_273; \ + __ret_273 = vsetq_lane_s32(vget_lane_s32(__s2_273, __p3_273), __s0_273, __p1_273); \ + __ret_273; \ +}) +#else +#define vcopyq_lane_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + int32x4_t __ret_274; \ + int32x4_t __s0_274 = __p0_274; \ + int32x2_t __s2_274 = __p2_274; \ + int32x4_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \ + int32x2_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 1, 0); \ + __ret_274 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_274, __p3_274), __rev0_274, __p1_274); \ + __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \ + __ret_274; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s64(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + int64x2_t __ret_275; \ + int64x2_t __s0_275 = __p0_275; \ + int64x1_t __s2_275 = __p2_275; \ + __ret_275 = vsetq_lane_s64(vget_lane_s64(__s2_275, __p3_275), __s0_275, __p1_275); \ + __ret_275; \ +}) +#else +#define vcopyq_lane_s64(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + int64x2_t __ret_276; \ + int64x2_t __s0_276 = __p0_276; \ + int64x1_t __s2_276 = __p2_276; \ + int64x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ + __ret_276 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_276, __p3_276), __rev0_276, __p1_276); \ + __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ + __ret_276; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ + int16x8_t __ret_277; \ + int16x8_t __s0_277 = __p0_277; \ + int16x4_t __s2_277 = __p2_277; \ + __ret_277 = vsetq_lane_s16(vget_lane_s16(__s2_277, __p3_277), __s0_277, __p1_277); \ + __ret_277; \ +}) +#else +#define vcopyq_lane_s16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ + int16x8_t __ret_278; \ + int16x8_t __s0_278 = __p0_278; \ + int16x4_t __s2_278 = __p2_278; \ + int16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_278; __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 3, 2, 1, 0); \ + __ret_278 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_278, __p3_278), __rev0_278, __p1_278); \ + __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_278; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ + poly8x8_t __ret_279; \ + poly8x8_t __s0_279 = __p0_279; \ + poly8x8_t __s2_279 = __p2_279; \ + __ret_279 = vset_lane_p8(vget_lane_p8(__s2_279, __p3_279), __s0_279, __p1_279); \ + __ret_279; \ +}) +#else +#define vcopy_lane_p8(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ + poly8x8_t __ret_280; \ + poly8x8_t __s0_280 = __p0_280; \ + poly8x8_t __s2_280 = __p2_280; \ + poly8x8_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_280; __rev2_280 = __builtin_shufflevector(__s2_280, __s2_280, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_280 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_280, __p3_280), __rev0_280, __p1_280); \ + __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_280; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + poly16x4_t __ret_281; \ + poly16x4_t __s0_281 = __p0_281; \ + poly16x4_t __s2_281 = __p2_281; \ + __ret_281 = vset_lane_p16(vget_lane_p16(__s2_281, __p3_281), __s0_281, __p1_281); \ + __ret_281; \ +}) +#else +#define vcopy_lane_p16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ + poly16x4_t __ret_282; \ + poly16x4_t __s0_282 = __p0_282; \ + poly16x4_t __s2_282 = __p2_282; \ + poly16x4_t __rev0_282; __rev0_282 = __builtin_shufflevector(__s0_282, __s0_282, 3, 2, 1, 0); \ + poly16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ + __ret_282 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_282, __p3_282), __rev0_282, __p1_282); \ + __ret_282 = __builtin_shufflevector(__ret_282, __ret_282, 3, 2, 1, 0); \ + __ret_282; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ + uint8x8_t __ret_283; \ + uint8x8_t __s0_283 = __p0_283; \ + uint8x8_t __s2_283 = __p2_283; \ + __ret_283 = vset_lane_u8(vget_lane_u8(__s2_283, __p3_283), __s0_283, __p1_283); \ + __ret_283; \ +}) +#else +#define vcopy_lane_u8(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ + uint8x8_t __ret_284; \ + uint8x8_t __s0_284 = __p0_284; \ + uint8x8_t __s2_284 = __p2_284; \ + uint8x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_284 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_284, __p3_284), __rev0_284, __p1_284); \ + __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_284; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ + uint32x2_t __ret_285; \ + uint32x2_t __s0_285 = __p0_285; \ + uint32x2_t __s2_285 = __p2_285; \ + __ret_285 = vset_lane_u32(vget_lane_u32(__s2_285, __p3_285), __s0_285, __p1_285); \ + __ret_285; \ +}) +#else +#define vcopy_lane_u32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ + uint32x2_t __ret_286; \ + uint32x2_t __s0_286 = __p0_286; \ + uint32x2_t __s2_286 = __p2_286; \ + uint32x2_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 1, 0); \ + uint32x2_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 1, 0); \ + __ret_286 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_286, __p3_286), __rev0_286, __p1_286); \ + __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 1, 0); \ + __ret_286; \ +}) +#endif + +#define vcopy_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ + uint64x1_t __ret_287; \ + uint64x1_t __s0_287 = __p0_287; \ + uint64x1_t __s2_287 = __p2_287; \ + __ret_287 = vset_lane_u64(vget_lane_u64(__s2_287, __p3_287), __s0_287, __p1_287); \ + __ret_287; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ + uint16x4_t __ret_288; \ + uint16x4_t __s0_288 = __p0_288; \ + uint16x4_t __s2_288 = __p2_288; \ + __ret_288 = vset_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ + __ret_288; \ +}) +#else +#define vcopy_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ + uint16x4_t __ret_289; \ + uint16x4_t __s0_289 = __p0_289; \ + uint16x4_t __s2_289 = __p2_289; \ + uint16x4_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 3, 2, 1, 0); \ + uint16x4_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 3, 2, 1, 0); \ + __ret_289 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ + __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 3, 2, 1, 0); \ + __ret_289; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ + int8x8_t __ret_290; \ + int8x8_t __s0_290 = __p0_290; \ + int8x8_t __s2_290 = __p2_290; \ + __ret_290 = vset_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ + __ret_290; \ +}) +#else +#define vcopy_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ + int8x8_t __ret_291; \ + int8x8_t __s0_291 = __p0_291; \ + int8x8_t __s2_291 = __p2_291; \ + int8x8_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ + __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ + float32x2_t __ret_292; \ + float32x2_t __s0_292 = __p0_292; \ + float32x2_t __s2_292 = __p2_292; \ + __ret_292 = vset_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ + __ret_292; \ +}) +#else +#define vcopy_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ + float32x2_t __ret_293; \ + float32x2_t __s0_293 = __p0_293; \ + float32x2_t __s2_293 = __p2_293; \ + float32x2_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 1, 0); \ + float32x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ + __ret_293 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ + __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 1, 0); \ + __ret_293; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ + int32x2_t __ret_294; \ + int32x2_t __s0_294 = __p0_294; \ + int32x2_t __s2_294 = __p2_294; \ + __ret_294 = vset_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ + __ret_294; \ +}) +#else +#define vcopy_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ + int32x2_t __ret_295; \ + int32x2_t __s0_295 = __p0_295; \ + int32x2_t __s2_295 = __p2_295; \ + int32x2_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 1, 0); \ + int32x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \ + __ret_295 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ + __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 1, 0); \ + __ret_295; \ +}) +#endif + +#define vcopy_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ + int64x1_t __ret_296; \ + int64x1_t __s0_296 = __p0_296; \ + int64x1_t __s2_296 = __p2_296; \ + __ret_296 = vset_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ + __ret_296; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ + int16x4_t __ret_297; \ + int16x4_t __s0_297 = __p0_297; \ + int16x4_t __s2_297 = __p2_297; \ + __ret_297 = vset_lane_s16(vget_lane_s16(__s2_297, __p3_297), __s0_297, __p1_297); \ + __ret_297; \ +}) +#else +#define vcopy_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ + int16x4_t __ret_298; \ + int16x4_t __s0_298 = __p0_298; \ + int16x4_t __s2_298 = __p2_298; \ + int16x4_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \ + int16x4_t __rev2_298; __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \ + __ret_298 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_298, __p3_298), __rev0_298, __p1_298); \ + __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \ + __ret_298; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p8(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ + poly8x16_t __ret_299; \ + poly8x16_t __s0_299 = __p0_299; \ + poly8x16_t __s2_299 = __p2_299; \ + __ret_299 = vsetq_lane_p8(vgetq_lane_p8(__s2_299, __p3_299), __s0_299, __p1_299); \ + __ret_299; \ +}) +#else +#define vcopyq_laneq_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ + poly8x16_t __ret_300; \ + poly8x16_t __s0_300 = __p0_300; \ + poly8x16_t __s2_300 = __p2_300; \ + poly8x16_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_300; __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_300 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_300, __p3_300), __rev0_300, __p1_300); \ + __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_300; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ + poly16x8_t __ret_301; \ + poly16x8_t __s0_301 = __p0_301; \ + poly16x8_t __s2_301 = __p2_301; \ + __ret_301 = vsetq_lane_p16(vgetq_lane_p16(__s2_301, __p3_301), __s0_301, __p1_301); \ + __ret_301; \ +}) +#else +#define vcopyq_laneq_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ + poly16x8_t __ret_302; \ + poly16x8_t __s0_302 = __p0_302; \ + poly16x8_t __s2_302 = __p2_302; \ + poly16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_302; __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_302 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_302, __p3_302), __rev0_302, __p1_302); \ + __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_302; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ + uint8x16_t __ret_303; \ + uint8x16_t __s0_303 = __p0_303; \ + uint8x16_t __s2_303 = __p2_303; \ + __ret_303 = vsetq_lane_u8(vgetq_lane_u8(__s2_303, __p3_303), __s0_303, __p1_303); \ + __ret_303; \ +}) +#else +#define vcopyq_laneq_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ + uint8x16_t __ret_304; \ + uint8x16_t __s0_304 = __p0_304; \ + uint8x16_t __s2_304 = __p2_304; \ + uint8x16_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_304; __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_304 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_304, __p3_304), __rev0_304, __p1_304); \ + __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_304; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ + uint32x4_t __ret_305; \ + uint32x4_t __s0_305 = __p0_305; \ + uint32x4_t __s2_305 = __p2_305; \ + __ret_305 = vsetq_lane_u32(vgetq_lane_u32(__s2_305, __p3_305), __s0_305, __p1_305); \ + __ret_305; \ +}) +#else +#define vcopyq_laneq_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ + uint32x4_t __ret_306; \ + uint32x4_t __s0_306 = __p0_306; \ + uint32x4_t __s2_306 = __p2_306; \ + uint32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ + uint32x4_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \ + __ret_306 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_306, __p3_306), __rev0_306, __p1_306); \ + __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ + __ret_306; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u64(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ + uint64x2_t __ret_307; \ + uint64x2_t __s0_307 = __p0_307; \ + uint64x2_t __s2_307 = __p2_307; \ + __ret_307 = vsetq_lane_u64(vgetq_lane_u64(__s2_307, __p3_307), __s0_307, __p1_307); \ + __ret_307; \ +}) +#else +#define vcopyq_laneq_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ + uint64x2_t __ret_308; \ + uint64x2_t __s0_308 = __p0_308; \ + uint64x2_t __s2_308 = __p2_308; \ + uint64x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ + uint64x2_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 1, 0); \ + __ret_308 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_308, __p3_308), __rev0_308, __p1_308); \ + __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ + __ret_308; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ + uint16x8_t __ret_309; \ + uint16x8_t __s0_309 = __p0_309; \ + uint16x8_t __s2_309 = __p2_309; \ + __ret_309 = vsetq_lane_u16(vgetq_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ + __ret_309; \ +}) +#else +#define vcopyq_laneq_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ + uint16x8_t __ret_310; \ + uint16x8_t __s0_310 = __p0_310; \ + uint16x8_t __s2_310 = __p2_310; \ + uint16x8_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_310 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ + __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_310; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ + int8x16_t __ret_311; \ + int8x16_t __s0_311 = __p0_311; \ + int8x16_t __s2_311 = __p2_311; \ + __ret_311 = vsetq_lane_s8(vgetq_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ + __ret_311; \ +}) +#else +#define vcopyq_laneq_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ + int8x16_t __ret_312; \ + int8x16_t __s0_312 = __p0_312; \ + int8x16_t __s2_312 = __p2_312; \ + int8x16_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ + __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ + float32x4_t __ret_313; \ + float32x4_t __s0_313 = __p0_313; \ + float32x4_t __s2_313 = __p2_313; \ + __ret_313 = vsetq_lane_f32(vgetq_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ + __ret_313; \ +}) +#else +#define vcopyq_laneq_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ + float32x4_t __ret_314; \ + float32x4_t __s0_314 = __p0_314; \ + float32x4_t __s2_314 = __p2_314; \ + float32x4_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 3, 2, 1, 0); \ + float32x4_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 3, 2, 1, 0); \ + __ret_314 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ + __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 3, 2, 1, 0); \ + __ret_314; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ + int32x4_t __ret_315; \ + int32x4_t __s0_315 = __p0_315; \ + int32x4_t __s2_315 = __p2_315; \ + __ret_315 = vsetq_lane_s32(vgetq_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ + __ret_315; \ +}) +#else +#define vcopyq_laneq_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ + int32x4_t __ret_316; \ + int32x4_t __s0_316 = __p0_316; \ + int32x4_t __s2_316 = __p2_316; \ + int32x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ + int32x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ + __ret_316 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ + __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ + __ret_316; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ + int64x2_t __ret_317; \ + int64x2_t __s0_317 = __p0_317; \ + int64x2_t __s2_317 = __p2_317; \ + __ret_317 = vsetq_lane_s64(vgetq_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ + __ret_317; \ +}) +#else +#define vcopyq_laneq_s64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ + int64x2_t __ret_318; \ + int64x2_t __s0_318 = __p0_318; \ + int64x2_t __s2_318 = __p2_318; \ + int64x2_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 1, 0); \ + int64x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ + __ret_318 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_318, __p3_318), __rev0_318, __p1_318); \ + __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 1, 0); \ + __ret_318; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ + int16x8_t __ret_319; \ + int16x8_t __s0_319 = __p0_319; \ + int16x8_t __s2_319 = __p2_319; \ + __ret_319 = vsetq_lane_s16(vgetq_lane_s16(__s2_319, __p3_319), __s0_319, __p1_319); \ + __ret_319; \ +}) +#else +#define vcopyq_laneq_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ + int16x8_t __ret_320; \ + int16x8_t __s0_320 = __p0_320; \ + int16x8_t __s2_320 = __p2_320; \ + int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_320 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_320, __p3_320), __rev0_320, __p1_320); \ + __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_320; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ + poly8x8_t __ret_321; \ + poly8x8_t __s0_321 = __p0_321; \ + poly8x16_t __s2_321 = __p2_321; \ + __ret_321 = vset_lane_p8(vgetq_lane_p8(__s2_321, __p3_321), __s0_321, __p1_321); \ + __ret_321; \ +}) +#else +#define vcopy_laneq_p8(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ + poly8x8_t __ret_322; \ + poly8x8_t __s0_322 = __p0_322; \ + poly8x16_t __s2_322 = __p2_322; \ + poly8x8_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_322 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_322, __p3_322), __rev0_322, __p1_322); \ + __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_322; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ + poly16x4_t __ret_323; \ + poly16x4_t __s0_323 = __p0_323; \ + poly16x8_t __s2_323 = __p2_323; \ + __ret_323 = vset_lane_p16(vgetq_lane_p16(__s2_323, __p3_323), __s0_323, __p1_323); \ + __ret_323; \ +}) +#else +#define vcopy_laneq_p16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ + poly16x4_t __ret_324; \ + poly16x4_t __s0_324 = __p0_324; \ + poly16x8_t __s2_324 = __p2_324; \ + poly16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ + poly16x8_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_324 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_324, __p3_324), __rev0_324, __p1_324); \ + __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ + __ret_324; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ + uint8x8_t __ret_325; \ + uint8x8_t __s0_325 = __p0_325; \ + uint8x16_t __s2_325 = __p2_325; \ + __ret_325 = vset_lane_u8(vgetq_lane_u8(__s2_325, __p3_325), __s0_325, __p1_325); \ + __ret_325; \ +}) +#else +#define vcopy_laneq_u8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ + uint8x8_t __ret_326; \ + uint8x8_t __s0_326 = __p0_326; \ + uint8x16_t __s2_326 = __p2_326; \ + uint8x8_t __rev0_326; __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_326 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_326, __p3_326), __rev0_326, __p1_326); \ + __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_326; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ + uint32x2_t __ret_327; \ + uint32x2_t __s0_327 = __p0_327; \ + uint32x4_t __s2_327 = __p2_327; \ + __ret_327 = vset_lane_u32(vgetq_lane_u32(__s2_327, __p3_327), __s0_327, __p1_327); \ + __ret_327; \ +}) +#else +#define vcopy_laneq_u32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ + uint32x2_t __ret_328; \ + uint32x2_t __s0_328 = __p0_328; \ + uint32x4_t __s2_328 = __p2_328; \ + uint32x2_t __rev0_328; __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \ + uint32x4_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \ + __ret_328 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_328, __p3_328), __rev0_328, __p1_328); \ + __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \ + __ret_328; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ + uint64x1_t __ret_329; \ + uint64x1_t __s0_329 = __p0_329; \ + uint64x2_t __s2_329 = __p2_329; \ + __ret_329 = vset_lane_u64(vgetq_lane_u64(__s2_329, __p3_329), __s0_329, __p1_329); \ + __ret_329; \ +}) +#else +#define vcopy_laneq_u64(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ + uint64x1_t __ret_330; \ + uint64x1_t __s0_330 = __p0_330; \ + uint64x2_t __s2_330 = __p2_330; \ + uint64x2_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 1, 0); \ + __ret_330 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_330, __p3_330), __s0_330, __p1_330); \ + __ret_330; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ + uint16x4_t __ret_331; \ + uint16x4_t __s0_331 = __p0_331; \ + uint16x8_t __s2_331 = __p2_331; \ + __ret_331 = vset_lane_u16(vgetq_lane_u16(__s2_331, __p3_331), __s0_331, __p1_331); \ + __ret_331; \ +}) +#else +#define vcopy_laneq_u16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ + uint16x4_t __ret_332; \ + uint16x4_t __s0_332 = __p0_332; \ + uint16x8_t __s2_332 = __p2_332; \ + uint16x4_t __rev0_332; __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \ + uint16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_332 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_332, __p3_332), __rev0_332, __p1_332); \ + __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \ + __ret_332; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ + int8x8_t __ret_333; \ + int8x8_t __s0_333 = __p0_333; \ + int8x16_t __s2_333 = __p2_333; \ + __ret_333 = vset_lane_s8(vgetq_lane_s8(__s2_333, __p3_333), __s0_333, __p1_333); \ + __ret_333; \ +}) +#else +#define vcopy_laneq_s8(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ + int8x8_t __ret_334; \ + int8x8_t __s0_334 = __p0_334; \ + int8x16_t __s2_334 = __p2_334; \ + int8x8_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_334 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_334, __p3_334), __rev0_334, __p1_334); \ + __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_334; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ + float32x2_t __ret_335; \ + float32x2_t __s0_335 = __p0_335; \ + float32x4_t __s2_335 = __p2_335; \ + __ret_335 = vset_lane_f32(vgetq_lane_f32(__s2_335, __p3_335), __s0_335, __p1_335); \ + __ret_335; \ +}) +#else +#define vcopy_laneq_f32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ + float32x2_t __ret_336; \ + float32x2_t __s0_336 = __p0_336; \ + float32x4_t __s2_336 = __p2_336; \ + float32x2_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 1, 0); \ + float32x4_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 3, 2, 1, 0); \ + __ret_336 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_336, __p3_336), __rev0_336, __p1_336); \ + __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 1, 0); \ + __ret_336; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ + int32x2_t __ret_337; \ + int32x2_t __s0_337 = __p0_337; \ + int32x4_t __s2_337 = __p2_337; \ + __ret_337 = vset_lane_s32(vgetq_lane_s32(__s2_337, __p3_337), __s0_337, __p1_337); \ + __ret_337; \ +}) +#else +#define vcopy_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ + int32x2_t __ret_338; \ + int32x2_t __s0_338 = __p0_338; \ + int32x4_t __s2_338 = __p2_338; \ + int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ + int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ + __ret_338 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_338, __p3_338), __rev0_338, __p1_338); \ + __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ + __ret_338; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ + int64x1_t __ret_339; \ + int64x1_t __s0_339 = __p0_339; \ + int64x2_t __s2_339 = __p2_339; \ + __ret_339 = vset_lane_s64(vgetq_lane_s64(__s2_339, __p3_339), __s0_339, __p1_339); \ + __ret_339; \ +}) +#else +#define vcopy_laneq_s64(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ + int64x1_t __ret_340; \ + int64x1_t __s0_340 = __p0_340; \ + int64x2_t __s2_340 = __p2_340; \ + int64x2_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 1, 0); \ + __ret_340 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_340, __p3_340), __s0_340, __p1_340); \ + __ret_340; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ + int16x4_t __ret_341; \ + int16x4_t __s0_341 = __p0_341; \ + int16x8_t __s2_341 = __p2_341; \ + __ret_341 = vset_lane_s16(vgetq_lane_s16(__s2_341, __p3_341), __s0_341, __p1_341); \ + __ret_341; \ +}) +#else +#define vcopy_laneq_s16(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ + int16x4_t __ret_342; \ + int16x4_t __s0_342 = __p0_342; \ + int16x8_t __s2_342 = __p2_342; \ + int16x4_t __rev0_342; __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 3, 2, 1, 0); \ + int16x8_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_342 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_342, __p3_342), __rev0_342, __p1_342); \ + __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 3, 2, 1, 0); \ + __ret_342; \ +}) +#endif + +#define vcreate_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float64x1_t)(__promote); \ + __ret; \ +}) +__ai float32_t vcvts_f32_s32(int32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); + return __ret; +} +__ai float32_t vcvts_f32_u32(uint32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#endif + +__ai float64_t vcvtd_f64_s64(int64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); + return __ret; +} +__ai float64_t vcvtd_f64_u64(uint64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); + return __ret; +} +#else +__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_f16(vget_high_f16(__p0)); + return __ret; +} +#else +__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); + return __ret; +} +#else +__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = vcvt_f64_f32(vget_high_f32(__p0)); + return __ret; +} +#else +__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ + __ret; \ +}) +#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + float32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + float64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + float32_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + float64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ + __ret; \ +}) +__ai int32_t vcvts_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); + return __ret; +} +__ai int64_t vcvtd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai uint32_t vcvts_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); + return __ret; +} +__ai uint64_t vcvtd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai int32_t vcvtas_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int64_t vcvtad_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtas_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai uint64_t vcvtad_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + return __ret; +} +__ai int32_t vcvtms_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int64_t vcvtmd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtms_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); + return __ret; +} +__ai int32_t vcvtns_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int64_t vcvtnd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtns_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + return __ret; +} +__ai int32_t vcvtps_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int64_t vcvtpd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtps_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); + return __ret; +} +__ai float32_t vcvtxd_f32_f64(float64_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); + return __ret; +} +#else +__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdup_lane_p64(__p0_343, __p1_343) __extension__ ({ \ + poly64x1_t __ret_343; \ + poly64x1_t __s0_343 = __p0_343; \ + __ret_343 = splat_lane_p64(__s0_343, __p1_343); \ + __ret_343; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p64(__p0_344, __p1_344) __extension__ ({ \ + poly64x2_t __ret_344; \ + poly64x1_t __s0_344 = __p0_344; \ + __ret_344 = splatq_lane_p64(__s0_344, __p1_344); \ + __ret_344; \ +}) +#else +#define vdupq_lane_p64(__p0_345, __p1_345) __extension__ ({ \ + poly64x2_t __ret_345; \ + poly64x1_t __s0_345 = __p0_345; \ + __ret_345 = __noswap_splatq_lane_p64(__s0_345, __p1_345); \ + __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 1, 0); \ + __ret_345; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f64(__p0_346, __p1_346) __extension__ ({ \ + float64x2_t __ret_346; \ + float64x1_t __s0_346 = __p0_346; \ + __ret_346 = splatq_lane_f64(__s0_346, __p1_346); \ + __ret_346; \ +}) +#else +#define vdupq_lane_f64(__p0_347, __p1_347) __extension__ ({ \ + float64x2_t __ret_347; \ + float64x1_t __s0_347 = __p0_347; \ + __ret_347 = __noswap_splatq_lane_f64(__s0_347, __p1_347); \ + __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 1, 0); \ + __ret_347; \ +}) +#endif + +#define vdup_lane_f64(__p0_348, __p1_348) __extension__ ({ \ + float64x1_t __ret_348; \ + float64x1_t __s0_348 = __p0_348; \ + __ret_348 = splat_lane_f64(__s0_348, __p1_348); \ + __ret_348; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p8(__p0_349, __p1_349) __extension__ ({ \ + poly8x8_t __ret_349; \ + poly8x16_t __s0_349 = __p0_349; \ + __ret_349 = splat_laneq_p8(__s0_349, __p1_349); \ + __ret_349; \ +}) +#else +#define vdup_laneq_p8(__p0_350, __p1_350) __extension__ ({ \ + poly8x8_t __ret_350; \ + poly8x16_t __s0_350 = __p0_350; \ + poly8x16_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_350 = __noswap_splat_laneq_p8(__rev0_350, __p1_350); \ + __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_350; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p64(__p0_351, __p1_351) __extension__ ({ \ + poly64x1_t __ret_351; \ + poly64x2_t __s0_351 = __p0_351; \ + __ret_351 = splat_laneq_p64(__s0_351, __p1_351); \ + __ret_351; \ +}) +#else +#define vdup_laneq_p64(__p0_352, __p1_352) __extension__ ({ \ + poly64x1_t __ret_352; \ + poly64x2_t __s0_352 = __p0_352; \ + poly64x2_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 1, 0); \ + __ret_352 = __noswap_splat_laneq_p64(__rev0_352, __p1_352); \ + __ret_352; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p16(__p0_353, __p1_353) __extension__ ({ \ + poly16x4_t __ret_353; \ + poly16x8_t __s0_353 = __p0_353; \ + __ret_353 = splat_laneq_p16(__s0_353, __p1_353); \ + __ret_353; \ +}) +#else +#define vdup_laneq_p16(__p0_354, __p1_354) __extension__ ({ \ + poly16x4_t __ret_354; \ + poly16x8_t __s0_354 = __p0_354; \ + poly16x8_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_354 = __noswap_splat_laneq_p16(__rev0_354, __p1_354); \ + __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 3, 2, 1, 0); \ + __ret_354; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p8(__p0_355, __p1_355) __extension__ ({ \ + poly8x16_t __ret_355; \ + poly8x16_t __s0_355 = __p0_355; \ + __ret_355 = splatq_laneq_p8(__s0_355, __p1_355); \ + __ret_355; \ +}) +#else +#define vdupq_laneq_p8(__p0_356, __p1_356) __extension__ ({ \ + poly8x16_t __ret_356; \ + poly8x16_t __s0_356 = __p0_356; \ + poly8x16_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_356 = __noswap_splatq_laneq_p8(__rev0_356, __p1_356); \ + __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_356; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p64(__p0_357, __p1_357) __extension__ ({ \ + poly64x2_t __ret_357; \ + poly64x2_t __s0_357 = __p0_357; \ + __ret_357 = splatq_laneq_p64(__s0_357, __p1_357); \ + __ret_357; \ +}) +#else +#define vdupq_laneq_p64(__p0_358, __p1_358) __extension__ ({ \ + poly64x2_t __ret_358; \ + poly64x2_t __s0_358 = __p0_358; \ + poly64x2_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 1, 0); \ + __ret_358 = __noswap_splatq_laneq_p64(__rev0_358, __p1_358); \ + __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 1, 0); \ + __ret_358; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p16(__p0_359, __p1_359) __extension__ ({ \ + poly16x8_t __ret_359; \ + poly16x8_t __s0_359 = __p0_359; \ + __ret_359 = splatq_laneq_p16(__s0_359, __p1_359); \ + __ret_359; \ +}) +#else +#define vdupq_laneq_p16(__p0_360, __p1_360) __extension__ ({ \ + poly16x8_t __ret_360; \ + poly16x8_t __s0_360 = __p0_360; \ + poly16x8_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_360 = __noswap_splatq_laneq_p16(__rev0_360, __p1_360); \ + __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_360; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u8(__p0_361, __p1_361) __extension__ ({ \ + uint8x16_t __ret_361; \ + uint8x16_t __s0_361 = __p0_361; \ + __ret_361 = splatq_laneq_u8(__s0_361, __p1_361); \ + __ret_361; \ +}) +#else +#define vdupq_laneq_u8(__p0_362, __p1_362) __extension__ ({ \ + uint8x16_t __ret_362; \ + uint8x16_t __s0_362 = __p0_362; \ + uint8x16_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_362 = __noswap_splatq_laneq_u8(__rev0_362, __p1_362); \ + __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_362; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u32(__p0_363, __p1_363) __extension__ ({ \ + uint32x4_t __ret_363; \ + uint32x4_t __s0_363 = __p0_363; \ + __ret_363 = splatq_laneq_u32(__s0_363, __p1_363); \ + __ret_363; \ +}) +#else +#define vdupq_laneq_u32(__p0_364, __p1_364) __extension__ ({ \ + uint32x4_t __ret_364; \ + uint32x4_t __s0_364 = __p0_364; \ + uint32x4_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \ + __ret_364 = __noswap_splatq_laneq_u32(__rev0_364, __p1_364); \ + __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \ + __ret_364; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u64(__p0_365, __p1_365) __extension__ ({ \ + uint64x2_t __ret_365; \ + uint64x2_t __s0_365 = __p0_365; \ + __ret_365 = splatq_laneq_u64(__s0_365, __p1_365); \ + __ret_365; \ +}) +#else +#define vdupq_laneq_u64(__p0_366, __p1_366) __extension__ ({ \ + uint64x2_t __ret_366; \ + uint64x2_t __s0_366 = __p0_366; \ + uint64x2_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \ + __ret_366 = __noswap_splatq_laneq_u64(__rev0_366, __p1_366); \ + __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ + __ret_366; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u16(__p0_367, __p1_367) __extension__ ({ \ + uint16x8_t __ret_367; \ + uint16x8_t __s0_367 = __p0_367; \ + __ret_367 = splatq_laneq_u16(__s0_367, __p1_367); \ + __ret_367; \ +}) +#else +#define vdupq_laneq_u16(__p0_368, __p1_368) __extension__ ({ \ + uint16x8_t __ret_368; \ + uint16x8_t __s0_368 = __p0_368; \ + uint16x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_368 = __noswap_splatq_laneq_u16(__rev0_368, __p1_368); \ + __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_368; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s8(__p0_369, __p1_369) __extension__ ({ \ + int8x16_t __ret_369; \ + int8x16_t __s0_369 = __p0_369; \ + __ret_369 = splatq_laneq_s8(__s0_369, __p1_369); \ + __ret_369; \ +}) +#else +#define vdupq_laneq_s8(__p0_370, __p1_370) __extension__ ({ \ + int8x16_t __ret_370; \ + int8x16_t __s0_370 = __p0_370; \ + int8x16_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_370 = __noswap_splatq_laneq_s8(__rev0_370, __p1_370); \ + __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_370; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f64(__p0_371, __p1_371) __extension__ ({ \ + float64x2_t __ret_371; \ + float64x2_t __s0_371 = __p0_371; \ + __ret_371 = splatq_laneq_f64(__s0_371, __p1_371); \ + __ret_371; \ +}) +#else +#define vdupq_laneq_f64(__p0_372, __p1_372) __extension__ ({ \ + float64x2_t __ret_372; \ + float64x2_t __s0_372 = __p0_372; \ + float64x2_t __rev0_372; __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 1, 0); \ + __ret_372 = __noswap_splatq_laneq_f64(__rev0_372, __p1_372); \ + __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 1, 0); \ + __ret_372; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f32(__p0_373, __p1_373) __extension__ ({ \ + float32x4_t __ret_373; \ + float32x4_t __s0_373 = __p0_373; \ + __ret_373 = splatq_laneq_f32(__s0_373, __p1_373); \ + __ret_373; \ +}) +#else +#define vdupq_laneq_f32(__p0_374, __p1_374) __extension__ ({ \ + float32x4_t __ret_374; \ + float32x4_t __s0_374 = __p0_374; \ + float32x4_t __rev0_374; __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \ + __ret_374 = __noswap_splatq_laneq_f32(__rev0_374, __p1_374); \ + __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \ + __ret_374; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f16(__p0_375, __p1_375) __extension__ ({ \ + float16x8_t __ret_375; \ + float16x8_t __s0_375 = __p0_375; \ + __ret_375 = splatq_laneq_f16(__s0_375, __p1_375); \ + __ret_375; \ +}) +#else +#define vdupq_laneq_f16(__p0_376, __p1_376) __extension__ ({ \ + float16x8_t __ret_376; \ + float16x8_t __s0_376 = __p0_376; \ + float16x8_t __rev0_376; __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_376 = __noswap_splatq_laneq_f16(__rev0_376, __p1_376); \ + __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_376; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s32(__p0_377, __p1_377) __extension__ ({ \ + int32x4_t __ret_377; \ + int32x4_t __s0_377 = __p0_377; \ + __ret_377 = splatq_laneq_s32(__s0_377, __p1_377); \ + __ret_377; \ +}) +#else +#define vdupq_laneq_s32(__p0_378, __p1_378) __extension__ ({ \ + int32x4_t __ret_378; \ + int32x4_t __s0_378 = __p0_378; \ + int32x4_t __rev0_378; __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 3, 2, 1, 0); \ + __ret_378 = __noswap_splatq_laneq_s32(__rev0_378, __p1_378); \ + __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 3, 2, 1, 0); \ + __ret_378; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s64(__p0_379, __p1_379) __extension__ ({ \ + int64x2_t __ret_379; \ + int64x2_t __s0_379 = __p0_379; \ + __ret_379 = splatq_laneq_s64(__s0_379, __p1_379); \ + __ret_379; \ +}) +#else +#define vdupq_laneq_s64(__p0_380, __p1_380) __extension__ ({ \ + int64x2_t __ret_380; \ + int64x2_t __s0_380 = __p0_380; \ + int64x2_t __rev0_380; __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 1, 0); \ + __ret_380 = __noswap_splatq_laneq_s64(__rev0_380, __p1_380); \ + __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 1, 0); \ + __ret_380; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s16(__p0_381, __p1_381) __extension__ ({ \ + int16x8_t __ret_381; \ + int16x8_t __s0_381 = __p0_381; \ + __ret_381 = splatq_laneq_s16(__s0_381, __p1_381); \ + __ret_381; \ +}) +#else +#define vdupq_laneq_s16(__p0_382, __p1_382) __extension__ ({ \ + int16x8_t __ret_382; \ + int16x8_t __s0_382 = __p0_382; \ + int16x8_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_382 = __noswap_splatq_laneq_s16(__rev0_382, __p1_382); \ + __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_382; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ + uint8x8_t __ret_383; \ + uint8x16_t __s0_383 = __p0_383; \ + __ret_383 = splat_laneq_u8(__s0_383, __p1_383); \ + __ret_383; \ +}) +#else +#define vdup_laneq_u8(__p0_384, __p1_384) __extension__ ({ \ + uint8x8_t __ret_384; \ + uint8x16_t __s0_384 = __p0_384; \ + uint8x16_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_384 = __noswap_splat_laneq_u8(__rev0_384, __p1_384); \ + __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_384; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ + uint32x2_t __ret_385; \ + uint32x4_t __s0_385 = __p0_385; \ + __ret_385 = splat_laneq_u32(__s0_385, __p1_385); \ + __ret_385; \ +}) +#else +#define vdup_laneq_u32(__p0_386, __p1_386) __extension__ ({ \ + uint32x2_t __ret_386; \ + uint32x4_t __s0_386 = __p0_386; \ + uint32x4_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 3, 2, 1, 0); \ + __ret_386 = __noswap_splat_laneq_u32(__rev0_386, __p1_386); \ + __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \ + __ret_386; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ + uint64x1_t __ret_387; \ + uint64x2_t __s0_387 = __p0_387; \ + __ret_387 = splat_laneq_u64(__s0_387, __p1_387); \ + __ret_387; \ +}) +#else +#define vdup_laneq_u64(__p0_388, __p1_388) __extension__ ({ \ + uint64x1_t __ret_388; \ + uint64x2_t __s0_388 = __p0_388; \ + uint64x2_t __rev0_388; __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 1, 0); \ + __ret_388 = __noswap_splat_laneq_u64(__rev0_388, __p1_388); \ + __ret_388; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ + uint16x4_t __ret_389; \ + uint16x8_t __s0_389 = __p0_389; \ + __ret_389 = splat_laneq_u16(__s0_389, __p1_389); \ + __ret_389; \ +}) +#else +#define vdup_laneq_u16(__p0_390, __p1_390) __extension__ ({ \ + uint16x4_t __ret_390; \ + uint16x8_t __s0_390 = __p0_390; \ + uint16x8_t __rev0_390; __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_390 = __noswap_splat_laneq_u16(__rev0_390, __p1_390); \ + __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \ + __ret_390; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ + int8x8_t __ret_391; \ + int8x16_t __s0_391 = __p0_391; \ + __ret_391 = splat_laneq_s8(__s0_391, __p1_391); \ + __ret_391; \ +}) +#else +#define vdup_laneq_s8(__p0_392, __p1_392) __extension__ ({ \ + int8x8_t __ret_392; \ + int8x16_t __s0_392 = __p0_392; \ + int8x16_t __rev0_392; __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_392 = __noswap_splat_laneq_s8(__rev0_392, __p1_392); \ + __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_392; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ + float64x1_t __ret_393; \ + float64x2_t __s0_393 = __p0_393; \ + __ret_393 = splat_laneq_f64(__s0_393, __p1_393); \ + __ret_393; \ +}) +#else +#define vdup_laneq_f64(__p0_394, __p1_394) __extension__ ({ \ + float64x1_t __ret_394; \ + float64x2_t __s0_394 = __p0_394; \ + float64x2_t __rev0_394; __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 1, 0); \ + __ret_394 = __noswap_splat_laneq_f64(__rev0_394, __p1_394); \ + __ret_394; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ + float32x2_t __ret_395; \ + float32x4_t __s0_395 = __p0_395; \ + __ret_395 = splat_laneq_f32(__s0_395, __p1_395); \ + __ret_395; \ +}) +#else +#define vdup_laneq_f32(__p0_396, __p1_396) __extension__ ({ \ + float32x2_t __ret_396; \ + float32x4_t __s0_396 = __p0_396; \ + float32x4_t __rev0_396; __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \ + __ret_396 = __noswap_splat_laneq_f32(__rev0_396, __p1_396); \ + __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \ + __ret_396; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ + float16x4_t __ret_397; \ + float16x8_t __s0_397 = __p0_397; \ + __ret_397 = splat_laneq_f16(__s0_397, __p1_397); \ + __ret_397; \ +}) +#else +#define vdup_laneq_f16(__p0_398, __p1_398) __extension__ ({ \ + float16x4_t __ret_398; \ + float16x8_t __s0_398 = __p0_398; \ + float16x8_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_398 = __noswap_splat_laneq_f16(__rev0_398, __p1_398); \ + __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ + __ret_398; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ + int32x2_t __ret_399; \ + int32x4_t __s0_399 = __p0_399; \ + __ret_399 = splat_laneq_s32(__s0_399, __p1_399); \ + __ret_399; \ +}) +#else +#define vdup_laneq_s32(__p0_400, __p1_400) __extension__ ({ \ + int32x2_t __ret_400; \ + int32x4_t __s0_400 = __p0_400; \ + int32x4_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \ + __ret_400 = __noswap_splat_laneq_s32(__rev0_400, __p1_400); \ + __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 1, 0); \ + __ret_400; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ + int64x1_t __ret_401; \ + int64x2_t __s0_401 = __p0_401; \ + __ret_401 = splat_laneq_s64(__s0_401, __p1_401); \ + __ret_401; \ +}) +#else +#define vdup_laneq_s64(__p0_402, __p1_402) __extension__ ({ \ + int64x1_t __ret_402; \ + int64x2_t __s0_402 = __p0_402; \ + int64x2_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \ + __ret_402 = __noswap_splat_laneq_s64(__rev0_402, __p1_402); \ + __ret_402; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ + int16x4_t __ret_403; \ + int16x8_t __s0_403 = __p0_403; \ + __ret_403 = splat_laneq_s16(__s0_403, __p1_403); \ + __ret_403; \ +}) +#else +#define vdup_laneq_s16(__p0_404, __p1_404) __extension__ ({ \ + int16x4_t __ret_404; \ + int16x8_t __s0_404 = __p0_404; \ + int16x8_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_404 = __noswap_splat_laneq_s16(__rev0_404, __p1_404); \ + __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 3, 2, 1, 0); \ + __ret_404; \ +}) +#endif + +__ai poly64x1_t vdup_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vdup_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#else +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, __p2); + return __ret; +} +#define vfmsd_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ + float64_t __ret_405; \ + float64_t __s0_405 = __p0_405; \ + float64_t __s1_405 = __p1_405; \ + float64x1_t __s2_405 = __p2_405; \ + __ret_405 = vfmad_lane_f64(__s0_405, -__s1_405, __s2_405, __p3_405); \ + __ret_405; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmss_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ + float32_t __ret_406; \ + float32_t __s0_406 = __p0_406; \ + float32_t __s1_406 = __p1_406; \ + float32x2_t __s2_406 = __p2_406; \ + __ret_406 = vfmas_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \ + __ret_406; \ +}) +#else +#define vfmss_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ + float32_t __ret_407; \ + float32_t __s0_407 = __p0_407; \ + float32_t __s1_407 = __p1_407; \ + float32x2_t __s2_407 = __p2_407; \ + float32x2_t __rev2_407; __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \ + __ret_407 = __noswap_vfmas_lane_f32(__s0_407, -__s1_407, __rev2_407, __p3_407); \ + __ret_407; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ + float64x2_t __ret_408; \ + float64x2_t __s0_408 = __p0_408; \ + float64x2_t __s1_408 = __p1_408; \ + float64x1_t __s2_408 = __p2_408; \ + __ret_408 = vfmaq_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \ + __ret_408; \ +}) +#else +#define vfmsq_lane_f64(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ + float64x2_t __ret_409; \ + float64x2_t __s0_409 = __p0_409; \ + float64x2_t __s1_409 = __p1_409; \ + float64x1_t __s2_409 = __p2_409; \ + float64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ + float64x2_t __rev1_409; __rev1_409 = __builtin_shufflevector(__s1_409, __s1_409, 1, 0); \ + __ret_409 = __noswap_vfmaq_lane_f64(__rev0_409, -__rev1_409, __s2_409, __p3_409); \ + __ret_409 = __builtin_shufflevector(__ret_409, __ret_409, 1, 0); \ + __ret_409; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ + float32x4_t __ret_410; \ + float32x4_t __s0_410 = __p0_410; \ + float32x4_t __s1_410 = __p1_410; \ + float32x2_t __s2_410 = __p2_410; \ + __ret_410 = vfmaq_lane_f32(__s0_410, -__s1_410, __s2_410, __p3_410); \ + __ret_410; \ +}) +#else +#define vfmsq_lane_f32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ + float32x4_t __ret_411; \ + float32x4_t __s0_411 = __p0_411; \ + float32x4_t __s1_411 = __p1_411; \ + float32x2_t __s2_411 = __p2_411; \ + float32x4_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 3, 2, 1, 0); \ + float32x4_t __rev1_411; __rev1_411 = __builtin_shufflevector(__s1_411, __s1_411, 3, 2, 1, 0); \ + float32x2_t __rev2_411; __rev2_411 = __builtin_shufflevector(__s2_411, __s2_411, 1, 0); \ + __ret_411 = __noswap_vfmaq_lane_f32(__rev0_411, -__rev1_411, __rev2_411, __p3_411); \ + __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ + __ret_411; \ +}) +#endif + +#define vfms_lane_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ + float64x1_t __ret_412; \ + float64x1_t __s0_412 = __p0_412; \ + float64x1_t __s1_412 = __p1_412; \ + float64x1_t __s2_412 = __p2_412; \ + __ret_412 = vfma_lane_f64(__s0_412, -__s1_412, __s2_412, __p3_412); \ + __ret_412; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ + float32x2_t __ret_413; \ + float32x2_t __s0_413 = __p0_413; \ + float32x2_t __s1_413 = __p1_413; \ + float32x2_t __s2_413 = __p2_413; \ + __ret_413 = vfma_lane_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \ + __ret_413; \ +}) +#else +#define vfms_lane_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ + float32x2_t __ret_414; \ + float32x2_t __s0_414 = __p0_414; \ + float32x2_t __s1_414 = __p1_414; \ + float32x2_t __s2_414 = __p2_414; \ + float32x2_t __rev0_414; __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 1, 0); \ + float32x2_t __rev1_414; __rev1_414 = __builtin_shufflevector(__s1_414, __s1_414, 1, 0); \ + float32x2_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \ + __ret_414 = __noswap_vfma_lane_f32(__rev0_414, -__rev1_414, __rev2_414, __p3_414); \ + __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 1, 0); \ + __ret_414; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsd_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ + float64_t __ret_415; \ + float64_t __s0_415 = __p0_415; \ + float64_t __s1_415 = __p1_415; \ + float64x2_t __s2_415 = __p2_415; \ + __ret_415 = vfmad_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \ + __ret_415; \ +}) +#else +#define vfmsd_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ + float64_t __ret_416; \ + float64_t __s0_416 = __p0_416; \ + float64_t __s1_416 = __p1_416; \ + float64x2_t __s2_416 = __p2_416; \ + float64x2_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \ + __ret_416 = __noswap_vfmad_laneq_f64(__s0_416, -__s1_416, __rev2_416, __p3_416); \ + __ret_416; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmss_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ + float32_t __ret_417; \ + float32_t __s0_417 = __p0_417; \ + float32_t __s1_417 = __p1_417; \ + float32x4_t __s2_417 = __p2_417; \ + __ret_417 = vfmas_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \ + __ret_417; \ +}) +#else +#define vfmss_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ + float32_t __ret_418; \ + float32_t __s0_418 = __p0_418; \ + float32_t __s1_418 = __p1_418; \ + float32x4_t __s2_418 = __p2_418; \ + float32x4_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \ + __ret_418 = __noswap_vfmas_laneq_f32(__s0_418, -__s1_418, __rev2_418, __p3_418); \ + __ret_418; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ + float64x2_t __ret_419; \ + float64x2_t __s0_419 = __p0_419; \ + float64x2_t __s1_419 = __p1_419; \ + float64x2_t __s2_419 = __p2_419; \ + __ret_419 = vfmaq_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \ + __ret_419; \ +}) +#else +#define vfmsq_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ + float64x2_t __ret_420; \ + float64x2_t __s0_420 = __p0_420; \ + float64x2_t __s1_420 = __p1_420; \ + float64x2_t __s2_420 = __p2_420; \ + float64x2_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \ + float64x2_t __rev1_420; __rev1_420 = __builtin_shufflevector(__s1_420, __s1_420, 1, 0); \ + float64x2_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \ + __ret_420 = __noswap_vfmaq_laneq_f64(__rev0_420, -__rev1_420, __rev2_420, __p3_420); \ + __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \ + __ret_420; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ + float32x4_t __ret_421; \ + float32x4_t __s0_421 = __p0_421; \ + float32x4_t __s1_421 = __p1_421; \ + float32x4_t __s2_421 = __p2_421; \ + __ret_421 = vfmaq_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \ + __ret_421; \ +}) +#else +#define vfmsq_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ + float32x4_t __ret_422; \ + float32x4_t __s0_422 = __p0_422; \ + float32x4_t __s1_422 = __p1_422; \ + float32x4_t __s2_422 = __p2_422; \ + float32x4_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 3, 2, 1, 0); \ + float32x4_t __rev1_422; __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 3, 2, 1, 0); \ + float32x4_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \ + __ret_422 = __noswap_vfmaq_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \ + __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 3, 2, 1, 0); \ + __ret_422; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ + float64x1_t __ret_423; \ + float64x1_t __s0_423 = __p0_423; \ + float64x1_t __s1_423 = __p1_423; \ + float64x2_t __s2_423 = __p2_423; \ + __ret_423 = vfma_laneq_f64(__s0_423, -__s1_423, __s2_423, __p3_423); \ + __ret_423; \ +}) +#else +#define vfms_laneq_f64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ + float64x1_t __ret_424; \ + float64x1_t __s0_424 = __p0_424; \ + float64x1_t __s1_424 = __p1_424; \ + float64x2_t __s2_424 = __p2_424; \ + float64x2_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \ + __ret_424 = __noswap_vfma_laneq_f64(__s0_424, -__s1_424, __rev2_424, __p3_424); \ + __ret_424; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f32(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ + float32x2_t __ret_425; \ + float32x2_t __s0_425 = __p0_425; \ + float32x2_t __s1_425 = __p1_425; \ + float32x4_t __s2_425 = __p2_425; \ + __ret_425 = vfma_laneq_f32(__s0_425, -__s1_425, __s2_425, __p3_425); \ + __ret_425; \ +}) +#else +#define vfms_laneq_f32(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ + float32x2_t __ret_426; \ + float32x2_t __s0_426 = __p0_426; \ + float32x2_t __s1_426 = __p1_426; \ + float32x4_t __s2_426 = __p2_426; \ + float32x2_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 1, 0); \ + float32x2_t __rev1_426; __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 1, 0); \ + float32x4_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 3, 2, 1, 0); \ + __ret_426 = __noswap_vfma_laneq_f32(__rev0_426, -__rev1_426, __rev2_426, __p3_426); \ + __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 1, 0); \ + __ret_426; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#define vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#define vld1_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ + __ret; \ +}) +#define vld1_dup_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ + __ret; \ +}) +#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#define vld1_p64_x2(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x2(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x3(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x3(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x4(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x4(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_dup_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __ret; \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __ret; \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ + __ret; \ +}) +#else +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __ret; \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __ret; \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ + __ret; \ +}) +#else +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __ret; \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __ret; \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ + __ret; \ +}) +#else +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __ret; \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __ret; \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ + __ret; \ +}) +#else +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __ret; \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __ret; \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ + __ret; \ +}) +#else +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __ret; \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __ret; \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ + __ret; \ +}) +#else +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __ret; \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __ret; \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ + __ret; \ +}) +#else +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __ret; \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __ret; \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ + __ret; \ +}) +#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __ret; \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ + __ret; \ +}) +#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __ret; \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ + __ret; \ +}) +#define vld3_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld3_dup_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __ret; \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __ret; \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ + __ret; \ +}) +#else +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __ret; \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __ret; \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ + __ret; \ +}) +#else +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __ret; \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __ret; \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ + __ret; \ +}) +#else +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __ret; \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __ret; \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ + __ret; \ +}) +#else +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __ret; \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __ret; \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ + __ret; \ +}) +#else +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __ret; \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __ret; \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ + __ret; \ +}) +#else +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __ret; \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __ret; \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ + __ret; \ +}) +#else +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __ret; \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __ret; \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ + __ret; \ +}) +#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __ret; \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ + __ret; \ +}) +#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __ret; \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ + __ret; \ +}) +#define vld4_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_dup_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __ret; \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __ret; \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ + __ret; \ +}) +#else +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __ret; \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __ret; \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ + __ret; \ +}) +#else +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __ret; \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __ret; \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ + __ret; \ +}) +#else +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __ret; \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __ret; \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ + __ret; \ +}) +#else +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __ret; \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __ret; \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ + __ret; \ +}) +#else +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __ret; \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __ret; \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ + __ret; \ +}) +#else +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __ret; \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __ret; \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ + __ret; \ +}) +#else +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __ret; \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __ret; \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ + __ret; \ +}) +#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __ret; \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ + __ret; \ +}) +#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __ret; \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ + __ret; \ +}) +#define vldrq_p128(__p0) __extension__ ({ \ + poly128_t __ret; \ + __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); + return __ret; +} +#else +__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); + return __ret; +} +#else +__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); + return __ret; +} +#else +__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); + return __ret; +} +#else +__ai int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); + return __ret; +} +#else +__ai int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); + return __ret; +} +#else +__ai int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); + return __ret; +} +#else +__ai uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); + return __ret; +} +#else +__ai uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); + return __ret; +} +#else +__ai uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); + return __ret; +} +#else +__ai int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); + return __ret; +} +#else +__ai int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); + return __ret; +} +#else +__ai int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); + return __ret; +} +#else +__ai float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); + return __ret; +} +#else +__ai uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); + return __ret; +} +#else +__ai uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); + return __ret; +} +#else +__ai uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); + return __ret; +} +#else +__ai int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); + return __ret; +} +#else +__ai int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); + return __ret; +} +#else +__ai int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); + return __ret; +} +#else +__ai uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); + return __ret; +} +#else +__ai uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); + return __ret; +} +#else +__ai uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8(__p0); + return __ret; +} +#else +__ai int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32(__p0); + return __ret; +} +#else +__ai float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32(__p0); + return __ret; +} +#else +__ai int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16(__p0); + return __ret; +} +#else +__ai int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ + uint32x4_t __ret_427; \ + uint32x4_t __s0_427 = __p0_427; \ + uint32x4_t __s1_427 = __p1_427; \ + uint32x4_t __s2_427 = __p2_427; \ + __ret_427 = __s0_427 + __s1_427 * splatq_laneq_u32(__s2_427, __p3_427); \ + __ret_427; \ +}) +#else +#define vmlaq_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ + uint32x4_t __ret_428; \ + uint32x4_t __s0_428 = __p0_428; \ + uint32x4_t __s1_428 = __p1_428; \ + uint32x4_t __s2_428 = __p2_428; \ + uint32x4_t __rev0_428; __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \ + uint32x4_t __rev1_428; __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \ + uint32x4_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \ + __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_u32(__rev2_428, __p3_428); \ + __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \ + __ret_428; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u16(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ + uint16x8_t __ret_429; \ + uint16x8_t __s0_429 = __p0_429; \ + uint16x8_t __s1_429 = __p1_429; \ + uint16x8_t __s2_429 = __p2_429; \ + __ret_429 = __s0_429 + __s1_429 * splatq_laneq_u16(__s2_429, __p3_429); \ + __ret_429; \ +}) +#else +#define vmlaq_laneq_u16(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ + uint16x8_t __ret_430; \ + uint16x8_t __s0_430 = __p0_430; \ + uint16x8_t __s1_430 = __p1_430; \ + uint16x8_t __s2_430 = __p2_430; \ + uint16x8_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_430; __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_u16(__rev2_430, __p3_430); \ + __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_430; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ + float32x4_t __ret_431; \ + float32x4_t __s0_431 = __p0_431; \ + float32x4_t __s1_431 = __p1_431; \ + float32x4_t __s2_431 = __p2_431; \ + __ret_431 = __s0_431 + __s1_431 * splatq_laneq_f32(__s2_431, __p3_431); \ + __ret_431; \ +}) +#else +#define vmlaq_laneq_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ + float32x4_t __ret_432; \ + float32x4_t __s0_432 = __p0_432; \ + float32x4_t __s1_432 = __p1_432; \ + float32x4_t __s2_432 = __p2_432; \ + float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ + float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ + float32x4_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 3, 2, 1, 0); \ + __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_f32(__rev2_432, __p3_432); \ + __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ + __ret_432; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ + int32x4_t __ret_433; \ + int32x4_t __s0_433 = __p0_433; \ + int32x4_t __s1_433 = __p1_433; \ + int32x4_t __s2_433 = __p2_433; \ + __ret_433 = __s0_433 + __s1_433 * splatq_laneq_s32(__s2_433, __p3_433); \ + __ret_433; \ +}) +#else +#define vmlaq_laneq_s32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ + int32x4_t __ret_434; \ + int32x4_t __s0_434 = __p0_434; \ + int32x4_t __s1_434 = __p1_434; \ + int32x4_t __s2_434 = __p2_434; \ + int32x4_t __rev0_434; __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 3, 2, 1, 0); \ + int32x4_t __rev1_434; __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 3, 2, 1, 0); \ + int32x4_t __rev2_434; __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \ + __ret_434 = __rev0_434 + __rev1_434 * __noswap_splatq_laneq_s32(__rev2_434, __p3_434); \ + __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 3, 2, 1, 0); \ + __ret_434; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ + int16x8_t __ret_435; \ + int16x8_t __s0_435 = __p0_435; \ + int16x8_t __s1_435 = __p1_435; \ + int16x8_t __s2_435 = __p2_435; \ + __ret_435 = __s0_435 + __s1_435 * splatq_laneq_s16(__s2_435, __p3_435); \ + __ret_435; \ +}) +#else +#define vmlaq_laneq_s16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ + int16x8_t __ret_436; \ + int16x8_t __s0_436 = __p0_436; \ + int16x8_t __s1_436 = __p1_436; \ + int16x8_t __s2_436 = __p2_436; \ + int16x8_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_436; __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_436; __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_436 = __rev0_436 + __rev1_436 * __noswap_splatq_laneq_s16(__rev2_436, __p3_436); \ + __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_436; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ + uint32x2_t __ret_437; \ + uint32x2_t __s0_437 = __p0_437; \ + uint32x2_t __s1_437 = __p1_437; \ + uint32x4_t __s2_437 = __p2_437; \ + __ret_437 = __s0_437 + __s1_437 * splat_laneq_u32(__s2_437, __p3_437); \ + __ret_437; \ +}) +#else +#define vmla_laneq_u32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ + uint32x2_t __ret_438; \ + uint32x2_t __s0_438 = __p0_438; \ + uint32x2_t __s1_438 = __p1_438; \ + uint32x4_t __s2_438 = __p2_438; \ + uint32x2_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \ + uint32x2_t __rev1_438; __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \ + uint32x4_t __rev2_438; __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \ + __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_u32(__rev2_438, __p3_438); \ + __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \ + __ret_438; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u16(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ + uint16x4_t __ret_439; \ + uint16x4_t __s0_439 = __p0_439; \ + uint16x4_t __s1_439 = __p1_439; \ + uint16x8_t __s2_439 = __p2_439; \ + __ret_439 = __s0_439 + __s1_439 * splat_laneq_u16(__s2_439, __p3_439); \ + __ret_439; \ +}) +#else +#define vmla_laneq_u16(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ + uint16x4_t __ret_440; \ + uint16x4_t __s0_440 = __p0_440; \ + uint16x4_t __s1_440 = __p1_440; \ + uint16x8_t __s2_440 = __p2_440; \ + uint16x4_t __rev0_440; __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 3, 2, 1, 0); \ + uint16x4_t __rev1_440; __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 3, 2, 1, 0); \ + uint16x8_t __rev2_440; __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_u16(__rev2_440, __p3_440); \ + __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 3, 2, 1, 0); \ + __ret_440; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_f32(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ + float32x2_t __ret_441; \ + float32x2_t __s0_441 = __p0_441; \ + float32x2_t __s1_441 = __p1_441; \ + float32x4_t __s2_441 = __p2_441; \ + __ret_441 = __s0_441 + __s1_441 * splat_laneq_f32(__s2_441, __p3_441); \ + __ret_441; \ +}) +#else +#define vmla_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ + float32x2_t __ret_442; \ + float32x2_t __s0_442 = __p0_442; \ + float32x2_t __s1_442 = __p1_442; \ + float32x4_t __s2_442 = __p2_442; \ + float32x2_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 1, 0); \ + float32x2_t __rev1_442; __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 1, 0); \ + float32x4_t __rev2_442; __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 3, 2, 1, 0); \ + __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_f32(__rev2_442, __p3_442); \ + __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 1, 0); \ + __ret_442; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ + int32x2_t __ret_443; \ + int32x2_t __s0_443 = __p0_443; \ + int32x2_t __s1_443 = __p1_443; \ + int32x4_t __s2_443 = __p2_443; \ + __ret_443 = __s0_443 + __s1_443 * splat_laneq_s32(__s2_443, __p3_443); \ + __ret_443; \ +}) +#else +#define vmla_laneq_s32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ + int32x2_t __ret_444; \ + int32x2_t __s0_444 = __p0_444; \ + int32x2_t __s1_444 = __p1_444; \ + int32x4_t __s2_444 = __p2_444; \ + int32x2_t __rev0_444; __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \ + int32x2_t __rev1_444; __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 1, 0); \ + int32x4_t __rev2_444; __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 3, 2, 1, 0); \ + __ret_444 = __rev0_444 + __rev1_444 * __noswap_splat_laneq_s32(__rev2_444, __p3_444); \ + __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \ + __ret_444; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ + int16x4_t __ret_445; \ + int16x4_t __s0_445 = __p0_445; \ + int16x4_t __s1_445 = __p1_445; \ + int16x8_t __s2_445 = __p2_445; \ + __ret_445 = __s0_445 + __s1_445 * splat_laneq_s16(__s2_445, __p3_445); \ + __ret_445; \ +}) +#else +#define vmla_laneq_s16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ + int16x4_t __ret_446; \ + int16x4_t __s0_446 = __p0_446; \ + int16x4_t __s1_446 = __p1_446; \ + int16x8_t __s2_446 = __p2_446; \ + int16x4_t __rev0_446; __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \ + int16x4_t __rev1_446; __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 3, 2, 1, 0); \ + int16x8_t __rev2_446; __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_446 = __rev0_446 + __rev1_446 * __noswap_splat_laneq_s16(__rev2_446, __p3_446); \ + __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \ + __ret_446; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ + uint64x2_t __ret_447; \ + uint64x2_t __s0_447 = __p0_447; \ + uint32x4_t __s1_447 = __p1_447; \ + uint32x2_t __s2_447 = __p2_447; \ + __ret_447 = __s0_447 + vmull_u32(vget_high_u32(__s1_447), splat_lane_u32(__s2_447, __p3_447)); \ + __ret_447; \ +}) +#else +#define vmlal_high_lane_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ + uint64x2_t __ret_448; \ + uint64x2_t __s0_448 = __p0_448; \ + uint32x4_t __s1_448 = __p1_448; \ + uint32x2_t __s2_448 = __p2_448; \ + uint64x2_t __rev0_448; __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \ + uint32x4_t __rev1_448; __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \ + uint32x2_t __rev2_448; __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \ + __ret_448 = __rev0_448 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_448), __noswap_splat_lane_u32(__rev2_448, __p3_448)); \ + __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \ + __ret_448; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ + uint32x4_t __ret_449; \ + uint32x4_t __s0_449 = __p0_449; \ + uint16x8_t __s1_449 = __p1_449; \ + uint16x4_t __s2_449 = __p2_449; \ + __ret_449 = __s0_449 + vmull_u16(vget_high_u16(__s1_449), splat_lane_u16(__s2_449, __p3_449)); \ + __ret_449; \ +}) +#else +#define vmlal_high_lane_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ + uint32x4_t __ret_450; \ + uint32x4_t __s0_450 = __p0_450; \ + uint16x8_t __s1_450 = __p1_450; \ + uint16x4_t __s2_450 = __p2_450; \ + uint32x4_t __rev0_450; __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \ + uint16x8_t __rev1_450; __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_450; __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \ + __ret_450 = __rev0_450 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_450), __noswap_splat_lane_u16(__rev2_450, __p3_450)); \ + __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \ + __ret_450; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ + int64x2_t __ret_451; \ + int64x2_t __s0_451 = __p0_451; \ + int32x4_t __s1_451 = __p1_451; \ + int32x2_t __s2_451 = __p2_451; \ + __ret_451 = __s0_451 + vmull_s32(vget_high_s32(__s1_451), splat_lane_s32(__s2_451, __p3_451)); \ + __ret_451; \ +}) +#else +#define vmlal_high_lane_s32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ + int64x2_t __ret_452; \ + int64x2_t __s0_452 = __p0_452; \ + int32x4_t __s1_452 = __p1_452; \ + int32x2_t __s2_452 = __p2_452; \ + int64x2_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \ + int32x4_t __rev1_452; __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \ + int32x2_t __rev2_452; __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 1, 0); \ + __ret_452 = __rev0_452 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_452), __noswap_splat_lane_s32(__rev2_452, __p3_452)); \ + __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \ + __ret_452; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ + int32x4_t __ret_453; \ + int32x4_t __s0_453 = __p0_453; \ + int16x8_t __s1_453 = __p1_453; \ + int16x4_t __s2_453 = __p2_453; \ + __ret_453 = __s0_453 + vmull_s16(vget_high_s16(__s1_453), splat_lane_s16(__s2_453, __p3_453)); \ + __ret_453; \ +}) +#else +#define vmlal_high_lane_s16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ + int32x4_t __ret_454; \ + int32x4_t __s0_454 = __p0_454; \ + int16x8_t __s1_454 = __p1_454; \ + int16x4_t __s2_454 = __p2_454; \ + int32x4_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \ + int16x8_t __rev1_454; __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_454; __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 3, 2, 1, 0); \ + __ret_454 = __rev0_454 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_454), __noswap_splat_lane_s16(__rev2_454, __p3_454)); \ + __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \ + __ret_454; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ + uint64x2_t __ret_455; \ + uint64x2_t __s0_455 = __p0_455; \ + uint32x4_t __s1_455 = __p1_455; \ + uint32x4_t __s2_455 = __p2_455; \ + __ret_455 = __s0_455 + vmull_u32(vget_high_u32(__s1_455), splat_laneq_u32(__s2_455, __p3_455)); \ + __ret_455; \ +}) +#else +#define vmlal_high_laneq_u32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ + uint64x2_t __ret_456; \ + uint64x2_t __s0_456 = __p0_456; \ + uint32x4_t __s1_456 = __p1_456; \ + uint32x4_t __s2_456 = __p2_456; \ + uint64x2_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \ + uint32x4_t __rev1_456; __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \ + uint32x4_t __rev2_456; __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \ + __ret_456 = __rev0_456 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_456), __noswap_splat_laneq_u32(__rev2_456, __p3_456)); \ + __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \ + __ret_456; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ + uint32x4_t __ret_457; \ + uint32x4_t __s0_457 = __p0_457; \ + uint16x8_t __s1_457 = __p1_457; \ + uint16x8_t __s2_457 = __p2_457; \ + __ret_457 = __s0_457 + vmull_u16(vget_high_u16(__s1_457), splat_laneq_u16(__s2_457, __p3_457)); \ + __ret_457; \ +}) +#else +#define vmlal_high_laneq_u16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ + uint32x4_t __ret_458; \ + uint32x4_t __s0_458 = __p0_458; \ + uint16x8_t __s1_458 = __p1_458; \ + uint16x8_t __s2_458 = __p2_458; \ + uint32x4_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \ + uint16x8_t __rev1_458; __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_458; __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_458 = __rev0_458 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_458), __noswap_splat_laneq_u16(__rev2_458, __p3_458)); \ + __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \ + __ret_458; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ + int64x2_t __ret_459; \ + int64x2_t __s0_459 = __p0_459; \ + int32x4_t __s1_459 = __p1_459; \ + int32x4_t __s2_459 = __p2_459; \ + __ret_459 = __s0_459 + vmull_s32(vget_high_s32(__s1_459), splat_laneq_s32(__s2_459, __p3_459)); \ + __ret_459; \ +}) +#else +#define vmlal_high_laneq_s32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ + int64x2_t __ret_460; \ + int64x2_t __s0_460 = __p0_460; \ + int32x4_t __s1_460 = __p1_460; \ + int32x4_t __s2_460 = __p2_460; \ + int64x2_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \ + int32x4_t __rev1_460; __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 3, 2, 1, 0); \ + int32x4_t __rev2_460; __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \ + __ret_460 = __rev0_460 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_460), __noswap_splat_laneq_s32(__rev2_460, __p3_460)); \ + __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \ + __ret_460; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ + int32x4_t __ret_461; \ + int32x4_t __s0_461 = __p0_461; \ + int16x8_t __s1_461 = __p1_461; \ + int16x8_t __s2_461 = __p2_461; \ + __ret_461 = __s0_461 + vmull_s16(vget_high_s16(__s1_461), splat_laneq_s16(__s2_461, __p3_461)); \ + __ret_461; \ +}) +#else +#define vmlal_high_laneq_s16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ + int32x4_t __ret_462; \ + int32x4_t __s0_462 = __p0_462; \ + int16x8_t __s1_462 = __p1_462; \ + int16x8_t __s2_462 = __p2_462; \ + int32x4_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \ + int16x8_t __rev1_462; __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_462; __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_462 = __rev0_462 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_462), __noswap_splat_laneq_s16(__rev2_462, __p3_462)); \ + __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \ + __ret_462; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ + uint64x2_t __ret_463; \ + uint64x2_t __s0_463 = __p0_463; \ + uint32x2_t __s1_463 = __p1_463; \ + uint32x4_t __s2_463 = __p2_463; \ + __ret_463 = __s0_463 + vmull_u32(__s1_463, splat_laneq_u32(__s2_463, __p3_463)); \ + __ret_463; \ +}) +#else +#define vmlal_laneq_u32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ + uint64x2_t __ret_464; \ + uint64x2_t __s0_464 = __p0_464; \ + uint32x2_t __s1_464 = __p1_464; \ + uint32x4_t __s2_464 = __p2_464; \ + uint64x2_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \ + uint32x2_t __rev1_464; __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \ + uint32x4_t __rev2_464; __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \ + __ret_464 = __rev0_464 + __noswap_vmull_u32(__rev1_464, __noswap_splat_laneq_u32(__rev2_464, __p3_464)); \ + __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \ + __ret_464; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ + uint32x4_t __ret_465; \ + uint32x4_t __s0_465 = __p0_465; \ + uint16x4_t __s1_465 = __p1_465; \ + uint16x8_t __s2_465 = __p2_465; \ + __ret_465 = __s0_465 + vmull_u16(__s1_465, splat_laneq_u16(__s2_465, __p3_465)); \ + __ret_465; \ +}) +#else +#define vmlal_laneq_u16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ + uint32x4_t __ret_466; \ + uint32x4_t __s0_466 = __p0_466; \ + uint16x4_t __s1_466 = __p1_466; \ + uint16x8_t __s2_466 = __p2_466; \ + uint32x4_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \ + uint16x4_t __rev1_466; __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \ + uint16x8_t __rev2_466; __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_466 = __rev0_466 + __noswap_vmull_u16(__rev1_466, __noswap_splat_laneq_u16(__rev2_466, __p3_466)); \ + __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \ + __ret_466; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ + int64x2_t __ret_467; \ + int64x2_t __s0_467 = __p0_467; \ + int32x2_t __s1_467 = __p1_467; \ + int32x4_t __s2_467 = __p2_467; \ + __ret_467 = __s0_467 + vmull_s32(__s1_467, splat_laneq_s32(__s2_467, __p3_467)); \ + __ret_467; \ +}) +#else +#define vmlal_laneq_s32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ + int64x2_t __ret_468; \ + int64x2_t __s0_468 = __p0_468; \ + int32x2_t __s1_468 = __p1_468; \ + int32x4_t __s2_468 = __p2_468; \ + int64x2_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \ + int32x2_t __rev1_468; __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 1, 0); \ + int32x4_t __rev2_468; __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \ + __ret_468 = __rev0_468 + __noswap_vmull_s32(__rev1_468, __noswap_splat_laneq_s32(__rev2_468, __p3_468)); \ + __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \ + __ret_468; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ + int32x4_t __ret_469; \ + int32x4_t __s0_469 = __p0_469; \ + int16x4_t __s1_469 = __p1_469; \ + int16x8_t __s2_469 = __p2_469; \ + __ret_469 = __s0_469 + vmull_s16(__s1_469, splat_laneq_s16(__s2_469, __p3_469)); \ + __ret_469; \ +}) +#else +#define vmlal_laneq_s16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ + int32x4_t __ret_470; \ + int32x4_t __s0_470 = __p0_470; \ + int16x4_t __s1_470 = __p1_470; \ + int16x8_t __s2_470 = __p2_470; \ + int32x4_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 3, 2, 1, 0); \ + int16x4_t __rev1_470; __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 3, 2, 1, 0); \ + int16x8_t __rev2_470; __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_470 = __rev0_470 + __noswap_vmull_s16(__rev1_470, __noswap_splat_laneq_s16(__rev2_470, __p3_470)); \ + __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 3, 2, 1, 0); \ + __ret_470; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ + uint32x4_t __ret_471; \ + uint32x4_t __s0_471 = __p0_471; \ + uint32x4_t __s1_471 = __p1_471; \ + uint32x4_t __s2_471 = __p2_471; \ + __ret_471 = __s0_471 - __s1_471 * splatq_laneq_u32(__s2_471, __p3_471); \ + __ret_471; \ +}) +#else +#define vmlsq_laneq_u32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ + uint32x4_t __ret_472; \ + uint32x4_t __s0_472 = __p0_472; \ + uint32x4_t __s1_472 = __p1_472; \ + uint32x4_t __s2_472 = __p2_472; \ + uint32x4_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \ + uint32x4_t __rev1_472; __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \ + uint32x4_t __rev2_472; __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \ + __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_u32(__rev2_472, __p3_472); \ + __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \ + __ret_472; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u16(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ + uint16x8_t __ret_473; \ + uint16x8_t __s0_473 = __p0_473; \ + uint16x8_t __s1_473 = __p1_473; \ + uint16x8_t __s2_473 = __p2_473; \ + __ret_473 = __s0_473 - __s1_473 * splatq_laneq_u16(__s2_473, __p3_473); \ + __ret_473; \ +}) +#else +#define vmlsq_laneq_u16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ + uint16x8_t __ret_474; \ + uint16x8_t __s0_474 = __p0_474; \ + uint16x8_t __s1_474 = __p1_474; \ + uint16x8_t __s2_474 = __p2_474; \ + uint16x8_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_474; __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_474; __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_u16(__rev2_474, __p3_474); \ + __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_474; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_f32(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ + float32x4_t __ret_475; \ + float32x4_t __s0_475 = __p0_475; \ + float32x4_t __s1_475 = __p1_475; \ + float32x4_t __s2_475 = __p2_475; \ + __ret_475 = __s0_475 - __s1_475 * splatq_laneq_f32(__s2_475, __p3_475); \ + __ret_475; \ +}) +#else +#define vmlsq_laneq_f32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ + float32x4_t __ret_476; \ + float32x4_t __s0_476 = __p0_476; \ + float32x4_t __s1_476 = __p1_476; \ + float32x4_t __s2_476 = __p2_476; \ + float32x4_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 3, 2, 1, 0); \ + float32x4_t __rev1_476; __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 3, 2, 1, 0); \ + float32x4_t __rev2_476; __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 3, 2, 1, 0); \ + __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_f32(__rev2_476, __p3_476); \ + __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 3, 2, 1, 0); \ + __ret_476; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ + int32x4_t __ret_477; \ + int32x4_t __s0_477 = __p0_477; \ + int32x4_t __s1_477 = __p1_477; \ + int32x4_t __s2_477 = __p2_477; \ + __ret_477 = __s0_477 - __s1_477 * splatq_laneq_s32(__s2_477, __p3_477); \ + __ret_477; \ +}) +#else +#define vmlsq_laneq_s32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ + int32x4_t __ret_478; \ + int32x4_t __s0_478 = __p0_478; \ + int32x4_t __s1_478 = __p1_478; \ + int32x4_t __s2_478 = __p2_478; \ + int32x4_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \ + int32x4_t __rev1_478; __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 3, 2, 1, 0); \ + int32x4_t __rev2_478; __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \ + __ret_478 = __rev0_478 - __rev1_478 * __noswap_splatq_laneq_s32(__rev2_478, __p3_478); \ + __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ + __ret_478; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ + int16x8_t __ret_479; \ + int16x8_t __s0_479 = __p0_479; \ + int16x8_t __s1_479 = __p1_479; \ + int16x8_t __s2_479 = __p2_479; \ + __ret_479 = __s0_479 - __s1_479 * splatq_laneq_s16(__s2_479, __p3_479); \ + __ret_479; \ +}) +#else +#define vmlsq_laneq_s16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ + int16x8_t __ret_480; \ + int16x8_t __s0_480 = __p0_480; \ + int16x8_t __s1_480 = __p1_480; \ + int16x8_t __s2_480 = __p2_480; \ + int16x8_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_480; __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_480; __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_480 = __rev0_480 - __rev1_480 * __noswap_splatq_laneq_s16(__rev2_480, __p3_480); \ + __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_480; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ + uint32x2_t __ret_481; \ + uint32x2_t __s0_481 = __p0_481; \ + uint32x2_t __s1_481 = __p1_481; \ + uint32x4_t __s2_481 = __p2_481; \ + __ret_481 = __s0_481 - __s1_481 * splat_laneq_u32(__s2_481, __p3_481); \ + __ret_481; \ +}) +#else +#define vmls_laneq_u32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ + uint32x2_t __ret_482; \ + uint32x2_t __s0_482 = __p0_482; \ + uint32x2_t __s1_482 = __p1_482; \ + uint32x4_t __s2_482 = __p2_482; \ + uint32x2_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \ + uint32x2_t __rev1_482; __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \ + uint32x4_t __rev2_482; __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \ + __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_u32(__rev2_482, __p3_482); \ + __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \ + __ret_482; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ + uint16x4_t __ret_483; \ + uint16x4_t __s0_483 = __p0_483; \ + uint16x4_t __s1_483 = __p1_483; \ + uint16x8_t __s2_483 = __p2_483; \ + __ret_483 = __s0_483 - __s1_483 * splat_laneq_u16(__s2_483, __p3_483); \ + __ret_483; \ +}) +#else +#define vmls_laneq_u16(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ + uint16x4_t __ret_484; \ + uint16x4_t __s0_484 = __p0_484; \ + uint16x4_t __s1_484 = __p1_484; \ + uint16x8_t __s2_484 = __p2_484; \ + uint16x4_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \ + uint16x4_t __rev1_484; __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 3, 2, 1, 0); \ + uint16x8_t __rev2_484; __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_u16(__rev2_484, __p3_484); \ + __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 3, 2, 1, 0); \ + __ret_484; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_f32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ + float32x2_t __ret_485; \ + float32x2_t __s0_485 = __p0_485; \ + float32x2_t __s1_485 = __p1_485; \ + float32x4_t __s2_485 = __p2_485; \ + __ret_485 = __s0_485 - __s1_485 * splat_laneq_f32(__s2_485, __p3_485); \ + __ret_485; \ +}) +#else +#define vmls_laneq_f32(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ + float32x2_t __ret_486; \ + float32x2_t __s0_486 = __p0_486; \ + float32x2_t __s1_486 = __p1_486; \ + float32x4_t __s2_486 = __p2_486; \ + float32x2_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 1, 0); \ + float32x2_t __rev1_486; __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 1, 0); \ + float32x4_t __rev2_486; __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 3, 2, 1, 0); \ + __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_f32(__rev2_486, __p3_486); \ + __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 1, 0); \ + __ret_486; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ + int32x2_t __ret_487; \ + int32x2_t __s0_487 = __p0_487; \ + int32x2_t __s1_487 = __p1_487; \ + int32x4_t __s2_487 = __p2_487; \ + __ret_487 = __s0_487 - __s1_487 * splat_laneq_s32(__s2_487, __p3_487); \ + __ret_487; \ +}) +#else +#define vmls_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ + int32x2_t __ret_488; \ + int32x2_t __s0_488 = __p0_488; \ + int32x2_t __s1_488 = __p1_488; \ + int32x4_t __s2_488 = __p2_488; \ + int32x2_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \ + int32x2_t __rev1_488; __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 1, 0); \ + int32x4_t __rev2_488; __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 3, 2, 1, 0); \ + __ret_488 = __rev0_488 - __rev1_488 * __noswap_splat_laneq_s32(__rev2_488, __p3_488); \ + __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \ + __ret_488; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ + int16x4_t __ret_489; \ + int16x4_t __s0_489 = __p0_489; \ + int16x4_t __s1_489 = __p1_489; \ + int16x8_t __s2_489 = __p2_489; \ + __ret_489 = __s0_489 - __s1_489 * splat_laneq_s16(__s2_489, __p3_489); \ + __ret_489; \ +}) +#else +#define vmls_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ + int16x4_t __ret_490; \ + int16x4_t __s0_490 = __p0_490; \ + int16x4_t __s1_490 = __p1_490; \ + int16x8_t __s2_490 = __p2_490; \ + int16x4_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \ + int16x4_t __rev1_490; __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 3, 2, 1, 0); \ + int16x8_t __rev2_490; __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_490 = __rev0_490 - __rev1_490 * __noswap_splat_laneq_s16(__rev2_490, __p3_490); \ + __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \ + __ret_490; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ + uint64x2_t __ret_491; \ + uint64x2_t __s0_491 = __p0_491; \ + uint32x4_t __s1_491 = __p1_491; \ + uint32x2_t __s2_491 = __p2_491; \ + __ret_491 = __s0_491 - vmull_u32(vget_high_u32(__s1_491), splat_lane_u32(__s2_491, __p3_491)); \ + __ret_491; \ +}) +#else +#define vmlsl_high_lane_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ + uint64x2_t __ret_492; \ + uint64x2_t __s0_492 = __p0_492; \ + uint32x4_t __s1_492 = __p1_492; \ + uint32x2_t __s2_492 = __p2_492; \ + uint64x2_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \ + uint32x4_t __rev1_492; __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \ + uint32x2_t __rev2_492; __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \ + __ret_492 = __rev0_492 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_492), __noswap_splat_lane_u32(__rev2_492, __p3_492)); \ + __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \ + __ret_492; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ + uint32x4_t __ret_493; \ + uint32x4_t __s0_493 = __p0_493; \ + uint16x8_t __s1_493 = __p1_493; \ + uint16x4_t __s2_493 = __p2_493; \ + __ret_493 = __s0_493 - vmull_u16(vget_high_u16(__s1_493), splat_lane_u16(__s2_493, __p3_493)); \ + __ret_493; \ +}) +#else +#define vmlsl_high_lane_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ + uint32x4_t __ret_494; \ + uint32x4_t __s0_494 = __p0_494; \ + uint16x8_t __s1_494 = __p1_494; \ + uint16x4_t __s2_494 = __p2_494; \ + uint32x4_t __rev0_494; __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \ + uint16x8_t __rev1_494; __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_494; __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \ + __ret_494 = __rev0_494 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_494), __noswap_splat_lane_u16(__rev2_494, __p3_494)); \ + __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \ + __ret_494; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ + int64x2_t __ret_495; \ + int64x2_t __s0_495 = __p0_495; \ + int32x4_t __s1_495 = __p1_495; \ + int32x2_t __s2_495 = __p2_495; \ + __ret_495 = __s0_495 - vmull_s32(vget_high_s32(__s1_495), splat_lane_s32(__s2_495, __p3_495)); \ + __ret_495; \ +}) +#else +#define vmlsl_high_lane_s32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ + int64x2_t __ret_496; \ + int64x2_t __s0_496 = __p0_496; \ + int32x4_t __s1_496 = __p1_496; \ + int32x2_t __s2_496 = __p2_496; \ + int64x2_t __rev0_496; __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \ + int32x4_t __rev1_496; __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \ + int32x2_t __rev2_496; __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 1, 0); \ + __ret_496 = __rev0_496 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_496), __noswap_splat_lane_s32(__rev2_496, __p3_496)); \ + __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \ + __ret_496; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ + int32x4_t __ret_497; \ + int32x4_t __s0_497 = __p0_497; \ + int16x8_t __s1_497 = __p1_497; \ + int16x4_t __s2_497 = __p2_497; \ + __ret_497 = __s0_497 - vmull_s16(vget_high_s16(__s1_497), splat_lane_s16(__s2_497, __p3_497)); \ + __ret_497; \ +}) +#else +#define vmlsl_high_lane_s16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ + int32x4_t __ret_498; \ + int32x4_t __s0_498 = __p0_498; \ + int16x8_t __s1_498 = __p1_498; \ + int16x4_t __s2_498 = __p2_498; \ + int32x4_t __rev0_498; __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \ + int16x8_t __rev1_498; __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_498; __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 3, 2, 1, 0); \ + __ret_498 = __rev0_498 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_498), __noswap_splat_lane_s16(__rev2_498, __p3_498)); \ + __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \ + __ret_498; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ + uint64x2_t __ret_499; \ + uint64x2_t __s0_499 = __p0_499; \ + uint32x4_t __s1_499 = __p1_499; \ + uint32x4_t __s2_499 = __p2_499; \ + __ret_499 = __s0_499 - vmull_u32(vget_high_u32(__s1_499), splat_laneq_u32(__s2_499, __p3_499)); \ + __ret_499; \ +}) +#else +#define vmlsl_high_laneq_u32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ + uint64x2_t __ret_500; \ + uint64x2_t __s0_500 = __p0_500; \ + uint32x4_t __s1_500 = __p1_500; \ + uint32x4_t __s2_500 = __p2_500; \ + uint64x2_t __rev0_500; __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \ + uint32x4_t __rev1_500; __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \ + uint32x4_t __rev2_500; __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \ + __ret_500 = __rev0_500 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_500), __noswap_splat_laneq_u32(__rev2_500, __p3_500)); \ + __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \ + __ret_500; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ + uint32x4_t __ret_501; \ + uint32x4_t __s0_501 = __p0_501; \ + uint16x8_t __s1_501 = __p1_501; \ + uint16x8_t __s2_501 = __p2_501; \ + __ret_501 = __s0_501 - vmull_u16(vget_high_u16(__s1_501), splat_laneq_u16(__s2_501, __p3_501)); \ + __ret_501; \ +}) +#else +#define vmlsl_high_laneq_u16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ + uint32x4_t __ret_502; \ + uint32x4_t __s0_502 = __p0_502; \ + uint16x8_t __s1_502 = __p1_502; \ + uint16x8_t __s2_502 = __p2_502; \ + uint32x4_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \ + uint16x8_t __rev1_502; __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_502; __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_502 = __rev0_502 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_502), __noswap_splat_laneq_u16(__rev2_502, __p3_502)); \ + __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \ + __ret_502; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ + int64x2_t __ret_503; \ + int64x2_t __s0_503 = __p0_503; \ + int32x4_t __s1_503 = __p1_503; \ + int32x4_t __s2_503 = __p2_503; \ + __ret_503 = __s0_503 - vmull_s32(vget_high_s32(__s1_503), splat_laneq_s32(__s2_503, __p3_503)); \ + __ret_503; \ +}) +#else +#define vmlsl_high_laneq_s32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ + int64x2_t __ret_504; \ + int64x2_t __s0_504 = __p0_504; \ + int32x4_t __s1_504 = __p1_504; \ + int32x4_t __s2_504 = __p2_504; \ + int64x2_t __rev0_504; __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \ + int32x4_t __rev1_504; __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 3, 2, 1, 0); \ + int32x4_t __rev2_504; __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \ + __ret_504 = __rev0_504 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_504), __noswap_splat_laneq_s32(__rev2_504, __p3_504)); \ + __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \ + __ret_504; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ + int32x4_t __ret_505; \ + int32x4_t __s0_505 = __p0_505; \ + int16x8_t __s1_505 = __p1_505; \ + int16x8_t __s2_505 = __p2_505; \ + __ret_505 = __s0_505 - vmull_s16(vget_high_s16(__s1_505), splat_laneq_s16(__s2_505, __p3_505)); \ + __ret_505; \ +}) +#else +#define vmlsl_high_laneq_s16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ + int32x4_t __ret_506; \ + int32x4_t __s0_506 = __p0_506; \ + int16x8_t __s1_506 = __p1_506; \ + int16x8_t __s2_506 = __p2_506; \ + int32x4_t __rev0_506; __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \ + int16x8_t __rev1_506; __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_506; __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_506 = __rev0_506 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_506), __noswap_splat_laneq_s16(__rev2_506, __p3_506)); \ + __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \ + __ret_506; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ + uint64x2_t __ret_507; \ + uint64x2_t __s0_507 = __p0_507; \ + uint32x2_t __s1_507 = __p1_507; \ + uint32x4_t __s2_507 = __p2_507; \ + __ret_507 = __s0_507 - vmull_u32(__s1_507, splat_laneq_u32(__s2_507, __p3_507)); \ + __ret_507; \ +}) +#else +#define vmlsl_laneq_u32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ + uint64x2_t __ret_508; \ + uint64x2_t __s0_508 = __p0_508; \ + uint32x2_t __s1_508 = __p1_508; \ + uint32x4_t __s2_508 = __p2_508; \ + uint64x2_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \ + uint32x2_t __rev1_508; __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \ + uint32x4_t __rev2_508; __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \ + __ret_508 = __rev0_508 - __noswap_vmull_u32(__rev1_508, __noswap_splat_laneq_u32(__rev2_508, __p3_508)); \ + __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \ + __ret_508; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ + uint32x4_t __ret_509; \ + uint32x4_t __s0_509 = __p0_509; \ + uint16x4_t __s1_509 = __p1_509; \ + uint16x8_t __s2_509 = __p2_509; \ + __ret_509 = __s0_509 - vmull_u16(__s1_509, splat_laneq_u16(__s2_509, __p3_509)); \ + __ret_509; \ +}) +#else +#define vmlsl_laneq_u16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ + uint32x4_t __ret_510; \ + uint32x4_t __s0_510 = __p0_510; \ + uint16x4_t __s1_510 = __p1_510; \ + uint16x8_t __s2_510 = __p2_510; \ + uint32x4_t __rev0_510; __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \ + uint16x4_t __rev1_510; __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \ + uint16x8_t __rev2_510; __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_510 = __rev0_510 - __noswap_vmull_u16(__rev1_510, __noswap_splat_laneq_u16(__rev2_510, __p3_510)); \ + __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \ + __ret_510; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ + int64x2_t __ret_511; \ + int64x2_t __s0_511 = __p0_511; \ + int32x2_t __s1_511 = __p1_511; \ + int32x4_t __s2_511 = __p2_511; \ + __ret_511 = __s0_511 - vmull_s32(__s1_511, splat_laneq_s32(__s2_511, __p3_511)); \ + __ret_511; \ +}) +#else +#define vmlsl_laneq_s32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ + int64x2_t __ret_512; \ + int64x2_t __s0_512 = __p0_512; \ + int32x2_t __s1_512 = __p1_512; \ + int32x4_t __s2_512 = __p2_512; \ + int64x2_t __rev0_512; __rev0_512 = __builtin_shufflevector(__s0_512, __s0_512, 1, 0); \ + int32x2_t __rev1_512; __rev1_512 = __builtin_shufflevector(__s1_512, __s1_512, 1, 0); \ + int32x4_t __rev2_512; __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 3, 2, 1, 0); \ + __ret_512 = __rev0_512 - __noswap_vmull_s32(__rev1_512, __noswap_splat_laneq_s32(__rev2_512, __p3_512)); \ + __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 1, 0); \ + __ret_512; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s16(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ + int32x4_t __ret_513; \ + int32x4_t __s0_513 = __p0_513; \ + int16x4_t __s1_513 = __p1_513; \ + int16x8_t __s2_513 = __p2_513; \ + __ret_513 = __s0_513 - vmull_s16(__s1_513, splat_laneq_s16(__s2_513, __p3_513)); \ + __ret_513; \ +}) +#else +#define vmlsl_laneq_s16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ + int32x4_t __ret_514; \ + int32x4_t __s0_514 = __p0_514; \ + int16x4_t __s1_514 = __p1_514; \ + int16x8_t __s2_514 = __p2_514; \ + int32x4_t __rev0_514; __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 3, 2, 1, 0); \ + int16x4_t __rev1_514; __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 3, 2, 1, 0); \ + int16x8_t __rev2_514; __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_514 = __rev0_514 - __noswap_vmull_s16(__rev1_514, __noswap_splat_laneq_s16(__rev2_514, __p3_514)); \ + __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 3, 2, 1, 0); \ + __ret_514; \ +}) +#endif + +__ai poly64x1_t vmov_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmov_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_515) { + uint16x8_t __ret_515; + uint8x8_t __a1_515 = vget_high_u8(__p0_515); + __ret_515 = (uint16x8_t)(vshll_n_u8(__a1_515, 0)); + return __ret_515; +} +#else +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_516) { + uint16x8_t __ret_516; + uint8x16_t __rev0_516; __rev0_516 = __builtin_shufflevector(__p0_516, __p0_516, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __a1_516 = __noswap_vget_high_u8(__rev0_516); + __ret_516 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_516, 0)); + __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_516; +} +__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_517) { + uint16x8_t __ret_517; + uint8x8_t __a1_517 = __noswap_vget_high_u8(__p0_517); + __ret_517 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_517, 0)); + return __ret_517; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_518) { + uint64x2_t __ret_518; + uint32x2_t __a1_518 = vget_high_u32(__p0_518); + __ret_518 = (uint64x2_t)(vshll_n_u32(__a1_518, 0)); + return __ret_518; +} +#else +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_519) { + uint64x2_t __ret_519; + uint32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__p0_519, __p0_519, 3, 2, 1, 0); + uint32x2_t __a1_519 = __noswap_vget_high_u32(__rev0_519); + __ret_519 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_519, 0)); + __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 1, 0); + return __ret_519; +} +__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_520) { + uint64x2_t __ret_520; + uint32x2_t __a1_520 = __noswap_vget_high_u32(__p0_520); + __ret_520 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_520, 0)); + return __ret_520; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_521) { + uint32x4_t __ret_521; + uint16x4_t __a1_521 = vget_high_u16(__p0_521); + __ret_521 = (uint32x4_t)(vshll_n_u16(__a1_521, 0)); + return __ret_521; +} +#else +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_522) { + uint32x4_t __ret_522; + uint16x8_t __rev0_522; __rev0_522 = __builtin_shufflevector(__p0_522, __p0_522, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __a1_522 = __noswap_vget_high_u16(__rev0_522); + __ret_522 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_522, 0)); + __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); + return __ret_522; +} +__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_523) { + uint32x4_t __ret_523; + uint16x4_t __a1_523 = __noswap_vget_high_u16(__p0_523); + __ret_523 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_523, 0)); + return __ret_523; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_524) { + int16x8_t __ret_524; + int8x8_t __a1_524 = vget_high_s8(__p0_524); + __ret_524 = (int16x8_t)(vshll_n_s8(__a1_524, 0)); + return __ret_524; +} +#else +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_525) { + int16x8_t __ret_525; + int8x16_t __rev0_525; __rev0_525 = __builtin_shufflevector(__p0_525, __p0_525, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __a1_525 = __noswap_vget_high_s8(__rev0_525); + __ret_525 = (int16x8_t)(__noswap_vshll_n_s8(__a1_525, 0)); + __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_525; +} +__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_526) { + int16x8_t __ret_526; + int8x8_t __a1_526 = __noswap_vget_high_s8(__p0_526); + __ret_526 = (int16x8_t)(__noswap_vshll_n_s8(__a1_526, 0)); + return __ret_526; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_527) { + int64x2_t __ret_527; + int32x2_t __a1_527 = vget_high_s32(__p0_527); + __ret_527 = (int64x2_t)(vshll_n_s32(__a1_527, 0)); + return __ret_527; +} +#else +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_528) { + int64x2_t __ret_528; + int32x4_t __rev0_528; __rev0_528 = __builtin_shufflevector(__p0_528, __p0_528, 3, 2, 1, 0); + int32x2_t __a1_528 = __noswap_vget_high_s32(__rev0_528); + __ret_528 = (int64x2_t)(__noswap_vshll_n_s32(__a1_528, 0)); + __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 1, 0); + return __ret_528; +} +__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_529) { + int64x2_t __ret_529; + int32x2_t __a1_529 = __noswap_vget_high_s32(__p0_529); + __ret_529 = (int64x2_t)(__noswap_vshll_n_s32(__a1_529, 0)); + return __ret_529; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_530) { + int32x4_t __ret_530; + int16x4_t __a1_530 = vget_high_s16(__p0_530); + __ret_530 = (int32x4_t)(vshll_n_s16(__a1_530, 0)); + return __ret_530; +} +#else +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_531) { + int32x4_t __ret_531; + int16x8_t __rev0_531; __rev0_531 = __builtin_shufflevector(__p0_531, __p0_531, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __a1_531 = __noswap_vget_high_s16(__rev0_531); + __ret_531 = (int32x4_t)(__noswap_vshll_n_s16(__a1_531, 0)); + __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); + return __ret_531; +} +__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_532) { + int32x4_t __ret_532; + int16x4_t __a1_532 = __noswap_vget_high_s16(__p0_532); + __ret_532 = (int32x4_t)(__noswap_vshll_n_s16(__a1_532, 0)); + return __ret_532; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vmovn_u32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vmovn_u64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vmovn_u16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vmovn_s32(__p1)); + return __ret; +} +#else +__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vmovn_s64(__p1)); + return __ret; +} +#else +__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vmovn_s16(__p1)); + return __ret; +} +#else +__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#define vmuld_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \ + float64_t __ret_533; \ + float64_t __s0_533 = __p0_533; \ + float64x1_t __s1_533 = __p1_533; \ + __ret_533 = __s0_533 * vget_lane_f64(__s1_533, __p2_533); \ + __ret_533; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmuls_lane_f32(__p0_534, __p1_534, __p2_534) __extension__ ({ \ + float32_t __ret_534; \ + float32_t __s0_534 = __p0_534; \ + float32x2_t __s1_534 = __p1_534; \ + __ret_534 = __s0_534 * vget_lane_f32(__s1_534, __p2_534); \ + __ret_534; \ +}) +#else +#define vmuls_lane_f32(__p0_535, __p1_535, __p2_535) __extension__ ({ \ + float32_t __ret_535; \ + float32_t __s0_535 = __p0_535; \ + float32x2_t __s1_535 = __p1_535; \ + float32x2_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \ + __ret_535 = __s0_535 * __noswap_vget_lane_f32(__rev1_535, __p2_535); \ + __ret_535; \ +}) +#endif + +#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f64(__p0_536, __p1_536, __p2_536) __extension__ ({ \ + float64x2_t __ret_536; \ + float64x2_t __s0_536 = __p0_536; \ + float64x1_t __s1_536 = __p1_536; \ + __ret_536 = __s0_536 * splatq_lane_f64(__s1_536, __p2_536); \ + __ret_536; \ +}) +#else +#define vmulq_lane_f64(__p0_537, __p1_537, __p2_537) __extension__ ({ \ + float64x2_t __ret_537; \ + float64x2_t __s0_537 = __p0_537; \ + float64x1_t __s1_537 = __p1_537; \ + float64x2_t __rev0_537; __rev0_537 = __builtin_shufflevector(__s0_537, __s0_537, 1, 0); \ + __ret_537 = __rev0_537 * __noswap_splatq_lane_f64(__s1_537, __p2_537); \ + __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 1, 0); \ + __ret_537; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuld_laneq_f64(__p0_538, __p1_538, __p2_538) __extension__ ({ \ + float64_t __ret_538; \ + float64_t __s0_538 = __p0_538; \ + float64x2_t __s1_538 = __p1_538; \ + __ret_538 = __s0_538 * vgetq_lane_f64(__s1_538, __p2_538); \ + __ret_538; \ +}) +#else +#define vmuld_laneq_f64(__p0_539, __p1_539, __p2_539) __extension__ ({ \ + float64_t __ret_539; \ + float64_t __s0_539 = __p0_539; \ + float64x2_t __s1_539 = __p1_539; \ + float64x2_t __rev1_539; __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 1, 0); \ + __ret_539 = __s0_539 * __noswap_vgetq_lane_f64(__rev1_539, __p2_539); \ + __ret_539; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuls_laneq_f32(__p0_540, __p1_540, __p2_540) __extension__ ({ \ + float32_t __ret_540; \ + float32_t __s0_540 = __p0_540; \ + float32x4_t __s1_540 = __p1_540; \ + __ret_540 = __s0_540 * vgetq_lane_f32(__s1_540, __p2_540); \ + __ret_540; \ +}) +#else +#define vmuls_laneq_f32(__p0_541, __p1_541, __p2_541) __extension__ ({ \ + float32_t __ret_541; \ + float32_t __s0_541 = __p0_541; \ + float32x4_t __s1_541 = __p1_541; \ + float32x4_t __rev1_541; __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 3, 2, 1, 0); \ + __ret_541 = __s0_541 * __noswap_vgetq_lane_f32(__rev1_541, __p2_541); \ + __ret_541; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u32(__p0_542, __p1_542, __p2_542) __extension__ ({ \ + uint32x4_t __ret_542; \ + uint32x4_t __s0_542 = __p0_542; \ + uint32x4_t __s1_542 = __p1_542; \ + __ret_542 = __s0_542 * splatq_laneq_u32(__s1_542, __p2_542); \ + __ret_542; \ +}) +#else +#define vmulq_laneq_u32(__p0_543, __p1_543, __p2_543) __extension__ ({ \ + uint32x4_t __ret_543; \ + uint32x4_t __s0_543 = __p0_543; \ + uint32x4_t __s1_543 = __p1_543; \ + uint32x4_t __rev0_543; __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 3, 2, 1, 0); \ + uint32x4_t __rev1_543; __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 3, 2, 1, 0); \ + __ret_543 = __rev0_543 * __noswap_splatq_laneq_u32(__rev1_543, __p2_543); \ + __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); \ + __ret_543; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u16(__p0_544, __p1_544, __p2_544) __extension__ ({ \ + uint16x8_t __ret_544; \ + uint16x8_t __s0_544 = __p0_544; \ + uint16x8_t __s1_544 = __p1_544; \ + __ret_544 = __s0_544 * splatq_laneq_u16(__s1_544, __p2_544); \ + __ret_544; \ +}) +#else +#define vmulq_laneq_u16(__p0_545, __p1_545, __p2_545) __extension__ ({ \ + uint16x8_t __ret_545; \ + uint16x8_t __s0_545 = __p0_545; \ + uint16x8_t __s1_545 = __p1_545; \ + uint16x8_t __rev0_545; __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_545; __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_545 = __rev0_545 * __noswap_splatq_laneq_u16(__rev1_545, __p2_545); \ + __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_545; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f64(__p0_546, __p1_546, __p2_546) __extension__ ({ \ + float64x2_t __ret_546; \ + float64x2_t __s0_546 = __p0_546; \ + float64x2_t __s1_546 = __p1_546; \ + __ret_546 = __s0_546 * splatq_laneq_f64(__s1_546, __p2_546); \ + __ret_546; \ +}) +#else +#define vmulq_laneq_f64(__p0_547, __p1_547, __p2_547) __extension__ ({ \ + float64x2_t __ret_547; \ + float64x2_t __s0_547 = __p0_547; \ + float64x2_t __s1_547 = __p1_547; \ + float64x2_t __rev0_547; __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 1, 0); \ + float64x2_t __rev1_547; __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 1, 0); \ + __ret_547 = __rev0_547 * __noswap_splatq_laneq_f64(__rev1_547, __p2_547); \ + __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 1, 0); \ + __ret_547; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f32(__p0_548, __p1_548, __p2_548) __extension__ ({ \ + float32x4_t __ret_548; \ + float32x4_t __s0_548 = __p0_548; \ + float32x4_t __s1_548 = __p1_548; \ + __ret_548 = __s0_548 * splatq_laneq_f32(__s1_548, __p2_548); \ + __ret_548; \ +}) +#else +#define vmulq_laneq_f32(__p0_549, __p1_549, __p2_549) __extension__ ({ \ + float32x4_t __ret_549; \ + float32x4_t __s0_549 = __p0_549; \ + float32x4_t __s1_549 = __p1_549; \ + float32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 3, 2, 1, 0); \ + float32x4_t __rev1_549; __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 3, 2, 1, 0); \ + __ret_549 = __rev0_549 * __noswap_splatq_laneq_f32(__rev1_549, __p2_549); \ + __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 3, 2, 1, 0); \ + __ret_549; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s32(__p0_550, __p1_550, __p2_550) __extension__ ({ \ + int32x4_t __ret_550; \ + int32x4_t __s0_550 = __p0_550; \ + int32x4_t __s1_550 = __p1_550; \ + __ret_550 = __s0_550 * splatq_laneq_s32(__s1_550, __p2_550); \ + __ret_550; \ +}) +#else +#define vmulq_laneq_s32(__p0_551, __p1_551, __p2_551) __extension__ ({ \ + int32x4_t __ret_551; \ + int32x4_t __s0_551 = __p0_551; \ + int32x4_t __s1_551 = __p1_551; \ + int32x4_t __rev0_551; __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 3, 2, 1, 0); \ + int32x4_t __rev1_551; __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \ + __ret_551 = __rev0_551 * __noswap_splatq_laneq_s32(__rev1_551, __p2_551); \ + __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 3, 2, 1, 0); \ + __ret_551; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s16(__p0_552, __p1_552, __p2_552) __extension__ ({ \ + int16x8_t __ret_552; \ + int16x8_t __s0_552 = __p0_552; \ + int16x8_t __s1_552 = __p1_552; \ + __ret_552 = __s0_552 * splatq_laneq_s16(__s1_552, __p2_552); \ + __ret_552; \ +}) +#else +#define vmulq_laneq_s16(__p0_553, __p1_553, __p2_553) __extension__ ({ \ + int16x8_t __ret_553; \ + int16x8_t __s0_553 = __p0_553; \ + int16x8_t __s1_553 = __p1_553; \ + int16x8_t __rev0_553; __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_553; __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_553 = __rev0_553 * __noswap_splatq_laneq_s16(__rev1_553, __p2_553); \ + __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_553; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u32(__p0_554, __p1_554, __p2_554) __extension__ ({ \ + uint32x2_t __ret_554; \ + uint32x2_t __s0_554 = __p0_554; \ + uint32x4_t __s1_554 = __p1_554; \ + __ret_554 = __s0_554 * splat_laneq_u32(__s1_554, __p2_554); \ + __ret_554; \ +}) +#else +#define vmul_laneq_u32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ + uint32x2_t __ret_555; \ + uint32x2_t __s0_555 = __p0_555; \ + uint32x4_t __s1_555 = __p1_555; \ + uint32x2_t __rev0_555; __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \ + uint32x4_t __rev1_555; __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \ + __ret_555 = __rev0_555 * __noswap_splat_laneq_u32(__rev1_555, __p2_555); \ + __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \ + __ret_555; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u16(__p0_556, __p1_556, __p2_556) __extension__ ({ \ + uint16x4_t __ret_556; \ + uint16x4_t __s0_556 = __p0_556; \ + uint16x8_t __s1_556 = __p1_556; \ + __ret_556 = __s0_556 * splat_laneq_u16(__s1_556, __p2_556); \ + __ret_556; \ +}) +#else +#define vmul_laneq_u16(__p0_557, __p1_557, __p2_557) __extension__ ({ \ + uint16x4_t __ret_557; \ + uint16x4_t __s0_557 = __p0_557; \ + uint16x8_t __s1_557 = __p1_557; \ + uint16x4_t __rev0_557; __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 3, 2, 1, 0); \ + uint16x8_t __rev1_557; __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_557 = __rev0_557 * __noswap_splat_laneq_u16(__rev1_557, __p2_557); \ + __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 3, 2, 1, 0); \ + __ret_557; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f32(__p0_558, __p1_558, __p2_558) __extension__ ({ \ + float32x2_t __ret_558; \ + float32x2_t __s0_558 = __p0_558; \ + float32x4_t __s1_558 = __p1_558; \ + __ret_558 = __s0_558 * splat_laneq_f32(__s1_558, __p2_558); \ + __ret_558; \ +}) +#else +#define vmul_laneq_f32(__p0_559, __p1_559, __p2_559) __extension__ ({ \ + float32x2_t __ret_559; \ + float32x2_t __s0_559 = __p0_559; \ + float32x4_t __s1_559 = __p1_559; \ + float32x2_t __rev0_559; __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 1, 0); \ + float32x4_t __rev1_559; __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 3, 2, 1, 0); \ + __ret_559 = __rev0_559 * __noswap_splat_laneq_f32(__rev1_559, __p2_559); \ + __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 1, 0); \ + __ret_559; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s32(__p0_560, __p1_560, __p2_560) __extension__ ({ \ + int32x2_t __ret_560; \ + int32x2_t __s0_560 = __p0_560; \ + int32x4_t __s1_560 = __p1_560; \ + __ret_560 = __s0_560 * splat_laneq_s32(__s1_560, __p2_560); \ + __ret_560; \ +}) +#else +#define vmul_laneq_s32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ + int32x2_t __ret_561; \ + int32x2_t __s0_561 = __p0_561; \ + int32x4_t __s1_561 = __p1_561; \ + int32x2_t __rev0_561; __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 1, 0); \ + int32x4_t __rev1_561; __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 3, 2, 1, 0); \ + __ret_561 = __rev0_561 * __noswap_splat_laneq_s32(__rev1_561, __p2_561); \ + __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \ + __ret_561; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s16(__p0_562, __p1_562, __p2_562) __extension__ ({ \ + int16x4_t __ret_562; \ + int16x4_t __s0_562 = __p0_562; \ + int16x8_t __s1_562 = __p1_562; \ + __ret_562 = __s0_562 * splat_laneq_s16(__s1_562, __p2_562); \ + __ret_562; \ +}) +#else +#define vmul_laneq_s16(__p0_563, __p1_563, __p2_563) __extension__ ({ \ + int16x4_t __ret_563; \ + int16x4_t __s0_563 = __p0_563; \ + int16x8_t __s1_563 = __p1_563; \ + int16x4_t __rev0_563; __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 3, 2, 1, 0); \ + int16x8_t __rev1_563; __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_563 = __rev0_563 * __noswap_splat_laneq_s16(__rev1_563, __p2_563); \ + __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \ + __ret_563; \ +}) +#endif + +__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + __ret = __p0 * (float64x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (float64x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); + return __ret; +} +#else +__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ + uint64x2_t __ret_564; \ + uint32x4_t __s0_564 = __p0_564; \ + uint32x2_t __s1_564 = __p1_564; \ + __ret_564 = vmull_u32(vget_high_u32(__s0_564), splat_lane_u32(__s1_564, __p2_564)); \ + __ret_564; \ +}) +#else +#define vmull_high_lane_u32(__p0_565, __p1_565, __p2_565) __extension__ ({ \ + uint64x2_t __ret_565; \ + uint32x4_t __s0_565 = __p0_565; \ + uint32x2_t __s1_565 = __p1_565; \ + uint32x4_t __rev0_565; __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \ + uint32x2_t __rev1_565; __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \ + __ret_565 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_565), __noswap_splat_lane_u32(__rev1_565, __p2_565)); \ + __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \ + __ret_565; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ + uint32x4_t __ret_566; \ + uint16x8_t __s0_566 = __p0_566; \ + uint16x4_t __s1_566 = __p1_566; \ + __ret_566 = vmull_u16(vget_high_u16(__s0_566), splat_lane_u16(__s1_566, __p2_566)); \ + __ret_566; \ +}) +#else +#define vmull_high_lane_u16(__p0_567, __p1_567, __p2_567) __extension__ ({ \ + uint32x4_t __ret_567; \ + uint16x8_t __s0_567 = __p0_567; \ + uint16x4_t __s1_567 = __p1_567; \ + uint16x8_t __rev0_567; __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_567; __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \ + __ret_567 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_567), __noswap_splat_lane_u16(__rev1_567, __p2_567)); \ + __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \ + __ret_567; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s32(__p0_568, __p1_568, __p2_568) __extension__ ({ \ + int64x2_t __ret_568; \ + int32x4_t __s0_568 = __p0_568; \ + int32x2_t __s1_568 = __p1_568; \ + __ret_568 = vmull_s32(vget_high_s32(__s0_568), splat_lane_s32(__s1_568, __p2_568)); \ + __ret_568; \ +}) +#else +#define vmull_high_lane_s32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ + int64x2_t __ret_569; \ + int32x4_t __s0_569 = __p0_569; \ + int32x2_t __s1_569 = __p1_569; \ + int32x4_t __rev0_569; __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \ + int32x2_t __rev1_569; __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 1, 0); \ + __ret_569 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_569), __noswap_splat_lane_s32(__rev1_569, __p2_569)); \ + __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \ + __ret_569; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s16(__p0_570, __p1_570, __p2_570) __extension__ ({ \ + int32x4_t __ret_570; \ + int16x8_t __s0_570 = __p0_570; \ + int16x4_t __s1_570 = __p1_570; \ + __ret_570 = vmull_s16(vget_high_s16(__s0_570), splat_lane_s16(__s1_570, __p2_570)); \ + __ret_570; \ +}) +#else +#define vmull_high_lane_s16(__p0_571, __p1_571, __p2_571) __extension__ ({ \ + int32x4_t __ret_571; \ + int16x8_t __s0_571 = __p0_571; \ + int16x4_t __s1_571 = __p1_571; \ + int16x8_t __rev0_571; __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_571; __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 3, 2, 1, 0); \ + __ret_571 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_571), __noswap_splat_lane_s16(__rev1_571, __p2_571)); \ + __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \ + __ret_571; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ + uint64x2_t __ret_572; \ + uint32x4_t __s0_572 = __p0_572; \ + uint32x4_t __s1_572 = __p1_572; \ + __ret_572 = vmull_u32(vget_high_u32(__s0_572), splat_laneq_u32(__s1_572, __p2_572)); \ + __ret_572; \ +}) +#else +#define vmull_high_laneq_u32(__p0_573, __p1_573, __p2_573) __extension__ ({ \ + uint64x2_t __ret_573; \ + uint32x4_t __s0_573 = __p0_573; \ + uint32x4_t __s1_573 = __p1_573; \ + uint32x4_t __rev0_573; __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \ + uint32x4_t __rev1_573; __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \ + __ret_573 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_573), __noswap_splat_laneq_u32(__rev1_573, __p2_573)); \ + __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \ + __ret_573; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ + uint32x4_t __ret_574; \ + uint16x8_t __s0_574 = __p0_574; \ + uint16x8_t __s1_574 = __p1_574; \ + __ret_574 = vmull_u16(vget_high_u16(__s0_574), splat_laneq_u16(__s1_574, __p2_574)); \ + __ret_574; \ +}) +#else +#define vmull_high_laneq_u16(__p0_575, __p1_575, __p2_575) __extension__ ({ \ + uint32x4_t __ret_575; \ + uint16x8_t __s0_575 = __p0_575; \ + uint16x8_t __s1_575 = __p1_575; \ + uint16x8_t __rev0_575; __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_575; __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_575 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_575), __noswap_splat_laneq_u16(__rev1_575, __p2_575)); \ + __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \ + __ret_575; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ + int64x2_t __ret_576; \ + int32x4_t __s0_576 = __p0_576; \ + int32x4_t __s1_576 = __p1_576; \ + __ret_576 = vmull_s32(vget_high_s32(__s0_576), splat_laneq_s32(__s1_576, __p2_576)); \ + __ret_576; \ +}) +#else +#define vmull_high_laneq_s32(__p0_577, __p1_577, __p2_577) __extension__ ({ \ + int64x2_t __ret_577; \ + int32x4_t __s0_577 = __p0_577; \ + int32x4_t __s1_577 = __p1_577; \ + int32x4_t __rev0_577; __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 3, 2, 1, 0); \ + int32x4_t __rev1_577; __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \ + __ret_577 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_577), __noswap_splat_laneq_s32(__rev1_577, __p2_577)); \ + __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \ + __ret_577; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ + int32x4_t __ret_578; \ + int16x8_t __s0_578 = __p0_578; \ + int16x8_t __s1_578 = __p1_578; \ + __ret_578 = vmull_s16(vget_high_s16(__s0_578), splat_laneq_s16(__s1_578, __p2_578)); \ + __ret_578; \ +}) +#else +#define vmull_high_laneq_s16(__p0_579, __p1_579, __p2_579) __extension__ ({ \ + int32x4_t __ret_579; \ + int16x8_t __s0_579 = __p0_579; \ + int16x8_t __s1_579 = __p1_579; \ + int16x8_t __rev0_579; __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_579; __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_579 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_579), __noswap_splat_laneq_s16(__rev1_579, __p2_579)); \ + __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \ + __ret_579; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_n_u32(vget_high_u32(__p0), __p1); + return __ret; +} +#else +__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_n_u16(vget_high_u16(__p0), __p1); + return __ret; +} +#else +__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ + uint64x2_t __ret_580; \ + uint32x2_t __s0_580 = __p0_580; \ + uint32x4_t __s1_580 = __p1_580; \ + __ret_580 = vmull_u32(__s0_580, splat_laneq_u32(__s1_580, __p2_580)); \ + __ret_580; \ +}) +#else +#define vmull_laneq_u32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ + uint64x2_t __ret_581; \ + uint32x2_t __s0_581 = __p0_581; \ + uint32x4_t __s1_581 = __p1_581; \ + uint32x2_t __rev0_581; __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \ + uint32x4_t __rev1_581; __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \ + __ret_581 = __noswap_vmull_u32(__rev0_581, __noswap_splat_laneq_u32(__rev1_581, __p2_581)); \ + __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \ + __ret_581; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u16(__p0_582, __p1_582, __p2_582) __extension__ ({ \ + uint32x4_t __ret_582; \ + uint16x4_t __s0_582 = __p0_582; \ + uint16x8_t __s1_582 = __p1_582; \ + __ret_582 = vmull_u16(__s0_582, splat_laneq_u16(__s1_582, __p2_582)); \ + __ret_582; \ +}) +#else +#define vmull_laneq_u16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ + uint32x4_t __ret_583; \ + uint16x4_t __s0_583 = __p0_583; \ + uint16x8_t __s1_583 = __p1_583; \ + uint16x4_t __rev0_583; __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \ + uint16x8_t __rev1_583; __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_583 = __noswap_vmull_u16(__rev0_583, __noswap_splat_laneq_u16(__rev1_583, __p2_583)); \ + __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \ + __ret_583; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s32(__p0_584, __p1_584, __p2_584) __extension__ ({ \ + int64x2_t __ret_584; \ + int32x2_t __s0_584 = __p0_584; \ + int32x4_t __s1_584 = __p1_584; \ + __ret_584 = vmull_s32(__s0_584, splat_laneq_s32(__s1_584, __p2_584)); \ + __ret_584; \ +}) +#else +#define vmull_laneq_s32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ + int64x2_t __ret_585; \ + int32x2_t __s0_585 = __p0_585; \ + int32x4_t __s1_585 = __p1_585; \ + int32x2_t __rev0_585; __rev0_585 = __builtin_shufflevector(__s0_585, __s0_585, 1, 0); \ + int32x4_t __rev1_585; __rev1_585 = __builtin_shufflevector(__s1_585, __s1_585, 3, 2, 1, 0); \ + __ret_585 = __noswap_vmull_s32(__rev0_585, __noswap_splat_laneq_s32(__rev1_585, __p2_585)); \ + __ret_585 = __builtin_shufflevector(__ret_585, __ret_585, 1, 0); \ + __ret_585; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s16(__p0_586, __p1_586, __p2_586) __extension__ ({ \ + int32x4_t __ret_586; \ + int16x4_t __s0_586 = __p0_586; \ + int16x8_t __s1_586 = __p1_586; \ + __ret_586 = vmull_s16(__s0_586, splat_laneq_s16(__s1_586, __p2_586)); \ + __ret_586; \ +}) +#else +#define vmull_laneq_s16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ + int32x4_t __ret_587; \ + int16x4_t __s0_587 = __p0_587; \ + int16x8_t __s1_587 = __p1_587; \ + int16x4_t __rev0_587; __rev0_587 = __builtin_shufflevector(__s0_587, __s0_587, 3, 2, 1, 0); \ + int16x8_t __rev1_587; __rev1_587 = __builtin_shufflevector(__s1_587, __s1_587, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_587 = __noswap_vmull_s16(__rev0_587, __noswap_splat_laneq_s16(__rev1_587, __p2_587)); \ + __ret_587 = __builtin_shufflevector(__ret_587, __ret_587, 3, 2, 1, 0); \ + __ret_587; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#endif + +__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#endif + +__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +#define vmulxd_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \ + float64_t __ret_588; \ + float64_t __s0_588 = __p0_588; \ + float64x1_t __s1_588 = __p1_588; \ + __ret_588 = vmulxd_f64(__s0_588, vget_lane_f64(__s1_588, __p2_588)); \ + __ret_588; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ + float32_t __ret_589; \ + float32_t __s0_589 = __p0_589; \ + float32x2_t __s1_589 = __p1_589; \ + __ret_589 = vmulxs_f32(__s0_589, vget_lane_f32(__s1_589, __p2_589)); \ + __ret_589; \ +}) +#else +#define vmulxs_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ + float32_t __ret_590; \ + float32_t __s0_590 = __p0_590; \ + float32x2_t __s1_590 = __p1_590; \ + float32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ + __ret_590 = vmulxs_f32(__s0_590, __noswap_vget_lane_f32(__rev1_590, __p2_590)); \ + __ret_590; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f64(__p0_591, __p1_591, __p2_591) __extension__ ({ \ + float64x2_t __ret_591; \ + float64x2_t __s0_591 = __p0_591; \ + float64x1_t __s1_591 = __p1_591; \ + __ret_591 = vmulxq_f64(__s0_591, splatq_lane_f64(__s1_591, __p2_591)); \ + __ret_591; \ +}) +#else +#define vmulxq_lane_f64(__p0_592, __p1_592, __p2_592) __extension__ ({ \ + float64x2_t __ret_592; \ + float64x2_t __s0_592 = __p0_592; \ + float64x1_t __s1_592 = __p1_592; \ + float64x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ + __ret_592 = __noswap_vmulxq_f64(__rev0_592, __noswap_splatq_lane_f64(__s1_592, __p2_592)); \ + __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ + __ret_592; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ + float32x4_t __ret_593; \ + float32x4_t __s0_593 = __p0_593; \ + float32x2_t __s1_593 = __p1_593; \ + __ret_593 = vmulxq_f32(__s0_593, splatq_lane_f32(__s1_593, __p2_593)); \ + __ret_593; \ +}) +#else +#define vmulxq_lane_f32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ + float32x4_t __ret_594; \ + float32x4_t __s0_594 = __p0_594; \ + float32x2_t __s1_594 = __p1_594; \ + float32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ + float32x2_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \ + __ret_594 = __noswap_vmulxq_f32(__rev0_594, __noswap_splatq_lane_f32(__rev1_594, __p2_594)); \ + __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ + __ret_594; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \ + float32x2_t __ret_595; \ + float32x2_t __s0_595 = __p0_595; \ + float32x2_t __s1_595 = __p1_595; \ + __ret_595 = vmulx_f32(__s0_595, splat_lane_f32(__s1_595, __p2_595)); \ + __ret_595; \ +}) +#else +#define vmulx_lane_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \ + float32x2_t __ret_596; \ + float32x2_t __s0_596 = __p0_596; \ + float32x2_t __s1_596 = __p1_596; \ + float32x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ + float32x2_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \ + __ret_596 = __noswap_vmulx_f32(__rev0_596, __noswap_splat_lane_f32(__rev1_596, __p2_596)); \ + __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ + __ret_596; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxd_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \ + float64_t __ret_597; \ + float64_t __s0_597 = __p0_597; \ + float64x2_t __s1_597 = __p1_597; \ + __ret_597 = vmulxd_f64(__s0_597, vgetq_lane_f64(__s1_597, __p2_597)); \ + __ret_597; \ +}) +#else +#define vmulxd_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \ + float64_t __ret_598; \ + float64_t __s0_598 = __p0_598; \ + float64x2_t __s1_598 = __p1_598; \ + float64x2_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \ + __ret_598 = vmulxd_f64(__s0_598, __noswap_vgetq_lane_f64(__rev1_598, __p2_598)); \ + __ret_598; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \ + float32_t __ret_599; \ + float32_t __s0_599 = __p0_599; \ + float32x4_t __s1_599 = __p1_599; \ + __ret_599 = vmulxs_f32(__s0_599, vgetq_lane_f32(__s1_599, __p2_599)); \ + __ret_599; \ +}) +#else +#define vmulxs_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \ + float32_t __ret_600; \ + float32_t __s0_600 = __p0_600; \ + float32x4_t __s1_600 = __p1_600; \ + float32x4_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \ + __ret_600 = vmulxs_f32(__s0_600, __noswap_vgetq_lane_f32(__rev1_600, __p2_600)); \ + __ret_600; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f64(__p0_601, __p1_601, __p2_601) __extension__ ({ \ + float64x2_t __ret_601; \ + float64x2_t __s0_601 = __p0_601; \ + float64x2_t __s1_601 = __p1_601; \ + __ret_601 = vmulxq_f64(__s0_601, splatq_laneq_f64(__s1_601, __p2_601)); \ + __ret_601; \ +}) +#else +#define vmulxq_laneq_f64(__p0_602, __p1_602, __p2_602) __extension__ ({ \ + float64x2_t __ret_602; \ + float64x2_t __s0_602 = __p0_602; \ + float64x2_t __s1_602 = __p1_602; \ + float64x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ + float64x2_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 1, 0); \ + __ret_602 = __noswap_vmulxq_f64(__rev0_602, __noswap_splatq_laneq_f64(__rev1_602, __p2_602)); \ + __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ + __ret_602; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f32(__p0_603, __p1_603, __p2_603) __extension__ ({ \ + float32x4_t __ret_603; \ + float32x4_t __s0_603 = __p0_603; \ + float32x4_t __s1_603 = __p1_603; \ + __ret_603 = vmulxq_f32(__s0_603, splatq_laneq_f32(__s1_603, __p2_603)); \ + __ret_603; \ +}) +#else +#define vmulxq_laneq_f32(__p0_604, __p1_604, __p2_604) __extension__ ({ \ + float32x4_t __ret_604; \ + float32x4_t __s0_604 = __p0_604; \ + float32x4_t __s1_604 = __p1_604; \ + float32x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ + float32x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \ + __ret_604 = __noswap_vmulxq_f32(__rev0_604, __noswap_splatq_laneq_f32(__rev1_604, __p2_604)); \ + __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ + __ret_604; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ + float32x2_t __ret_605; \ + float32x2_t __s0_605 = __p0_605; \ + float32x4_t __s1_605 = __p1_605; \ + __ret_605 = vmulx_f32(__s0_605, splat_laneq_f32(__s1_605, __p2_605)); \ + __ret_605; \ +}) +#else +#define vmulx_laneq_f32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ + float32x2_t __ret_606; \ + float32x2_t __s0_606 = __p0_606; \ + float32x4_t __s1_606 = __p1_606; \ + float32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ + float32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ + __ret_606 = __noswap_vmulx_f32(__rev0_606, __noswap_splat_laneq_f32(__rev1_606, __p2_606)); \ + __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ + __ret_606; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vneg_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai int64x1_t vneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai int64_t vnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); + return __ret; +} +#else +__ai uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); + return __ret; +} +#else +__ai int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); + return __ret; +} +#else +__ai float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); + return __ret; +} +#else +__ai float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); + return __ret; +} +#else +__ai float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); + return __ret; +} +#else +__ai float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); + return __ret; +} +#else +__ai float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int8_t vqabsb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); + return __ret; +} +__ai int32_t vqabss_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); + return __ret; +} +__ai int64_t vqabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); + return __ret; +} +__ai int16_t vqabsh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); + return __ret; +} +__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); + return __ret; +} +__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \ + int64x2_t __ret_607; \ + int64x2_t __s0_607 = __p0_607; \ + int32x4_t __s1_607 = __p1_607; \ + int32x2_t __s2_607 = __p2_607; \ + __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_lane_s32(__s2_607, __p3_607)); \ + __ret_607; \ +}) +#else +#define vqdmlal_high_lane_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \ + int64x2_t __ret_608; \ + int64x2_t __s0_608 = __p0_608; \ + int32x4_t __s1_608 = __p1_608; \ + int32x2_t __s2_608 = __p2_608; \ + int64x2_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \ + int32x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \ + int32x2_t __rev2_608; __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 1, 0); \ + __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_lane_s32(__rev2_608, __p3_608)); \ + __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \ + __ret_608; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \ + int32x4_t __ret_609; \ + int32x4_t __s0_609 = __p0_609; \ + int16x8_t __s1_609 = __p1_609; \ + int16x4_t __s2_609 = __p2_609; \ + __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_lane_s16(__s2_609, __p3_609)); \ + __ret_609; \ +}) +#else +#define vqdmlal_high_lane_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \ + int32x4_t __ret_610; \ + int32x4_t __s0_610 = __p0_610; \ + int16x8_t __s1_610 = __p1_610; \ + int16x4_t __s2_610 = __p2_610; \ + int32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \ + int16x8_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_610; __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 3, 2, 1, 0); \ + __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_lane_s16(__rev2_610, __p3_610)); \ + __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \ + __ret_610; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \ + int64x2_t __ret_611; \ + int64x2_t __s0_611 = __p0_611; \ + int32x4_t __s1_611 = __p1_611; \ + int32x4_t __s2_611 = __p2_611; \ + __ret_611 = vqdmlal_s32(__s0_611, vget_high_s32(__s1_611), splat_laneq_s32(__s2_611, __p3_611)); \ + __ret_611; \ +}) +#else +#define vqdmlal_high_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \ + int64x2_t __ret_612; \ + int64x2_t __s0_612 = __p0_612; \ + int32x4_t __s1_612 = __p1_612; \ + int32x4_t __s2_612 = __p2_612; \ + int64x2_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \ + int32x4_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 3, 2, 1, 0); \ + int32x4_t __rev2_612; __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \ + __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __noswap_vget_high_s32(__rev1_612), __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \ + __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \ + __ret_612; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \ + int32x4_t __ret_613; \ + int32x4_t __s0_613 = __p0_613; \ + int16x8_t __s1_613 = __p1_613; \ + int16x8_t __s2_613 = __p2_613; \ + __ret_613 = vqdmlal_s16(__s0_613, vget_high_s16(__s1_613), splat_laneq_s16(__s2_613, __p3_613)); \ + __ret_613; \ +}) +#else +#define vqdmlal_high_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \ + int32x4_t __ret_614; \ + int32x4_t __s0_614 = __p0_614; \ + int16x8_t __s1_614 = __p1_614; \ + int16x8_t __s2_614 = __p2_614; \ + int32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \ + int16x8_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_614; __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __noswap_vget_high_s16(__rev1_614), __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \ + __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \ + __ret_614; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \ + int64x2_t __ret_615; \ + int64x2_t __s0_615 = __p0_615; \ + int32x2_t __s1_615 = __p1_615; \ + int32x4_t __s2_615 = __p2_615; \ + __ret_615 = vqdmlal_s32(__s0_615, __s1_615, splat_laneq_s32(__s2_615, __p3_615)); \ + __ret_615; \ +}) +#else +#define vqdmlal_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \ + int64x2_t __ret_616; \ + int64x2_t __s0_616 = __p0_616; \ + int32x2_t __s1_616 = __p1_616; \ + int32x4_t __s2_616 = __p2_616; \ + int64x2_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \ + int32x2_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \ + int32x4_t __rev2_616; __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \ + __ret_616 = __noswap_vqdmlal_s32(__rev0_616, __rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \ + __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \ + __ret_616; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \ + int32x4_t __ret_617; \ + int32x4_t __s0_617 = __p0_617; \ + int16x4_t __s1_617 = __p1_617; \ + int16x8_t __s2_617 = __p2_617; \ + __ret_617 = vqdmlal_s16(__s0_617, __s1_617, splat_laneq_s16(__s2_617, __p3_617)); \ + __ret_617; \ +}) +#else +#define vqdmlal_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \ + int32x4_t __ret_618; \ + int32x4_t __s0_618 = __p0_618; \ + int16x4_t __s1_618 = __p1_618; \ + int16x8_t __s2_618 = __p2_618; \ + int32x4_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \ + int16x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \ + int16x8_t __rev2_618; __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_618 = __noswap_vqdmlal_s16(__rev0_618, __rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \ + __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \ + __ret_618; \ +}) +#endif + +__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); + return __ret; +} +__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \ + int64x2_t __ret_619; \ + int64x2_t __s0_619 = __p0_619; \ + int32x4_t __s1_619 = __p1_619; \ + int32x2_t __s2_619 = __p2_619; \ + __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_lane_s32(__s2_619, __p3_619)); \ + __ret_619; \ +}) +#else +#define vqdmlsl_high_lane_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \ + int64x2_t __ret_620; \ + int64x2_t __s0_620 = __p0_620; \ + int32x4_t __s1_620 = __p1_620; \ + int32x2_t __s2_620 = __p2_620; \ + int64x2_t __rev0_620; __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \ + int32x4_t __rev1_620; __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \ + int32x2_t __rev2_620; __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 1, 0); \ + __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_lane_s32(__rev2_620, __p3_620)); \ + __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \ + __ret_620; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \ + int32x4_t __ret_621; \ + int32x4_t __s0_621 = __p0_621; \ + int16x8_t __s1_621 = __p1_621; \ + int16x4_t __s2_621 = __p2_621; \ + __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_lane_s16(__s2_621, __p3_621)); \ + __ret_621; \ +}) +#else +#define vqdmlsl_high_lane_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \ + int32x4_t __ret_622; \ + int32x4_t __s0_622 = __p0_622; \ + int16x8_t __s1_622 = __p1_622; \ + int16x4_t __s2_622 = __p2_622; \ + int32x4_t __rev0_622; __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \ + int16x8_t __rev1_622; __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_622; __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 3, 2, 1, 0); \ + __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_lane_s16(__rev2_622, __p3_622)); \ + __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \ + __ret_622; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \ + int64x2_t __ret_623; \ + int64x2_t __s0_623 = __p0_623; \ + int32x4_t __s1_623 = __p1_623; \ + int32x4_t __s2_623 = __p2_623; \ + __ret_623 = vqdmlsl_s32(__s0_623, vget_high_s32(__s1_623), splat_laneq_s32(__s2_623, __p3_623)); \ + __ret_623; \ +}) +#else +#define vqdmlsl_high_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \ + int64x2_t __ret_624; \ + int64x2_t __s0_624 = __p0_624; \ + int32x4_t __s1_624 = __p1_624; \ + int32x4_t __s2_624 = __p2_624; \ + int64x2_t __rev0_624; __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \ + int32x4_t __rev1_624; __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 3, 2, 1, 0); \ + int32x4_t __rev2_624; __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \ + __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __noswap_vget_high_s32(__rev1_624), __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \ + __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \ + __ret_624; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \ + int32x4_t __ret_625; \ + int32x4_t __s0_625 = __p0_625; \ + int16x8_t __s1_625 = __p1_625; \ + int16x8_t __s2_625 = __p2_625; \ + __ret_625 = vqdmlsl_s16(__s0_625, vget_high_s16(__s1_625), splat_laneq_s16(__s2_625, __p3_625)); \ + __ret_625; \ +}) +#else +#define vqdmlsl_high_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \ + int32x4_t __ret_626; \ + int32x4_t __s0_626 = __p0_626; \ + int16x8_t __s1_626 = __p1_626; \ + int16x8_t __s2_626 = __p2_626; \ + int32x4_t __rev0_626; __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \ + int16x8_t __rev1_626; __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_626; __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __noswap_vget_high_s16(__rev1_626), __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \ + __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \ + __ret_626; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s32(__p0_627, __p1_627, __p2_627, __p3_627) __extension__ ({ \ + int64x2_t __ret_627; \ + int64x2_t __s0_627 = __p0_627; \ + int32x2_t __s1_627 = __p1_627; \ + int32x4_t __s2_627 = __p2_627; \ + __ret_627 = vqdmlsl_s32(__s0_627, __s1_627, splat_laneq_s32(__s2_627, __p3_627)); \ + __ret_627; \ +}) +#else +#define vqdmlsl_laneq_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ + int64x2_t __ret_628; \ + int64x2_t __s0_628 = __p0_628; \ + int32x2_t __s1_628 = __p1_628; \ + int32x4_t __s2_628 = __p2_628; \ + int64x2_t __rev0_628; __rev0_628 = __builtin_shufflevector(__s0_628, __s0_628, 1, 0); \ + int32x2_t __rev1_628; __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \ + int32x4_t __rev2_628; __rev2_628 = __builtin_shufflevector(__s2_628, __s2_628, 3, 2, 1, 0); \ + __ret_628 = __noswap_vqdmlsl_s32(__rev0_628, __rev1_628, __noswap_splat_laneq_s32(__rev2_628, __p3_628)); \ + __ret_628 = __builtin_shufflevector(__ret_628, __ret_628, 1, 0); \ + __ret_628; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s16(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ + int32x4_t __ret_629; \ + int32x4_t __s0_629 = __p0_629; \ + int16x4_t __s1_629 = __p1_629; \ + int16x8_t __s2_629 = __p2_629; \ + __ret_629 = vqdmlsl_s16(__s0_629, __s1_629, splat_laneq_s16(__s2_629, __p3_629)); \ + __ret_629; \ +}) +#else +#define vqdmlsl_laneq_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ + int32x4_t __ret_630; \ + int32x4_t __s0_630 = __p0_630; \ + int16x4_t __s1_630 = __p1_630; \ + int16x8_t __s2_630 = __p2_630; \ + int32x4_t __rev0_630; __rev0_630 = __builtin_shufflevector(__s0_630, __s0_630, 3, 2, 1, 0); \ + int16x4_t __rev1_630; __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \ + int16x8_t __rev2_630; __rev2_630 = __builtin_shufflevector(__s2_630, __s2_630, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_630 = __noswap_vqdmlsl_s16(__rev0_630, __rev1_630, __noswap_splat_laneq_s16(__rev2_630, __p3_630)); \ + __ret_630 = __builtin_shufflevector(__ret_630, __ret_630, 3, 2, 1, 0); \ + __ret_630; \ +}) +#endif + +__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_lane_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \ + int32_t __ret_631; \ + int32_t __s0_631 = __p0_631; \ + int32x2_t __s1_631 = __p1_631; \ + __ret_631 = vqdmulhs_s32(__s0_631, vget_lane_s32(__s1_631, __p2_631)); \ + __ret_631; \ +}) +#else +#define vqdmulhs_lane_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \ + int32_t __ret_632; \ + int32_t __s0_632 = __p0_632; \ + int32x2_t __s1_632 = __p1_632; \ + int32x2_t __rev1_632; __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 1, 0); \ + __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vget_lane_s32(__rev1_632, __p2_632)); \ + __ret_632; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_lane_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \ + int16_t __ret_633; \ + int16_t __s0_633 = __p0_633; \ + int16x4_t __s1_633 = __p1_633; \ + __ret_633 = vqdmulhh_s16(__s0_633, vget_lane_s16(__s1_633, __p2_633)); \ + __ret_633; \ +}) +#else +#define vqdmulhh_lane_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \ + int16_t __ret_634; \ + int16_t __s0_634 = __p0_634; \ + int16x4_t __s1_634 = __p1_634; \ + int16x4_t __rev1_634; __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 3, 2, 1, 0); \ + __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vget_lane_s16(__rev1_634, __p2_634)); \ + __ret_634; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_laneq_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \ + int32_t __ret_635; \ + int32_t __s0_635 = __p0_635; \ + int32x4_t __s1_635 = __p1_635; \ + __ret_635 = vqdmulhs_s32(__s0_635, vgetq_lane_s32(__s1_635, __p2_635)); \ + __ret_635; \ +}) +#else +#define vqdmulhs_laneq_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \ + int32_t __ret_636; \ + int32_t __s0_636 = __p0_636; \ + int32x4_t __s1_636 = __p1_636; \ + int32x4_t __rev1_636; __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 3, 2, 1, 0); \ + __ret_636 = vqdmulhs_s32(__s0_636, __noswap_vgetq_lane_s32(__rev1_636, __p2_636)); \ + __ret_636; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_laneq_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \ + int16_t __ret_637; \ + int16_t __s0_637 = __p0_637; \ + int16x8_t __s1_637 = __p1_637; \ + __ret_637 = vqdmulhh_s16(__s0_637, vgetq_lane_s16(__s1_637, __p2_637)); \ + __ret_637; \ +}) +#else +#define vqdmulhh_laneq_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \ + int16_t __ret_638; \ + int16_t __s0_638 = __p0_638; \ + int16x8_t __s1_638 = __p1_638; \ + int16x8_t __rev1_638; __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_638 = vqdmulhh_s16(__s0_638, __noswap_vgetq_lane_s16(__rev1_638, __p2_638)); \ + __ret_638; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); + return __ret; +} +__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ + int64x2_t __ret_639; \ + int32x4_t __s0_639 = __p0_639; \ + int32x2_t __s1_639 = __p1_639; \ + __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_lane_s32(__s1_639, __p2_639)); \ + __ret_639; \ +}) +#else +#define vqdmull_high_lane_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \ + int64x2_t __ret_640; \ + int32x4_t __s0_640 = __p0_640; \ + int32x2_t __s1_640 = __p1_640; \ + int32x4_t __rev0_640; __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \ + int32x2_t __rev1_640; __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 1, 0); \ + __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_lane_s32(__rev1_640, __p2_640)); \ + __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \ + __ret_640; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \ + int32x4_t __ret_641; \ + int16x8_t __s0_641 = __p0_641; \ + int16x4_t __s1_641 = __p1_641; \ + __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_lane_s16(__s1_641, __p2_641)); \ + __ret_641; \ +}) +#else +#define vqdmull_high_lane_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \ + int32x4_t __ret_642; \ + int16x8_t __s0_642 = __p0_642; \ + int16x4_t __s1_642 = __p1_642; \ + int16x8_t __rev0_642; __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_642; __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 3, 2, 1, 0); \ + __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_lane_s16(__rev1_642, __p2_642)); \ + __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \ + __ret_642; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \ + int64x2_t __ret_643; \ + int32x4_t __s0_643 = __p0_643; \ + int32x4_t __s1_643 = __p1_643; \ + __ret_643 = vqdmull_s32(vget_high_s32(__s0_643), splat_laneq_s32(__s1_643, __p2_643)); \ + __ret_643; \ +}) +#else +#define vqdmull_high_laneq_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \ + int64x2_t __ret_644; \ + int32x4_t __s0_644 = __p0_644; \ + int32x4_t __s1_644 = __p1_644; \ + int32x4_t __rev0_644; __rev0_644 = __builtin_shufflevector(__s0_644, __s0_644, 3, 2, 1, 0); \ + int32x4_t __rev1_644; __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 3, 2, 1, 0); \ + __ret_644 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_644), __noswap_splat_laneq_s32(__rev1_644, __p2_644)); \ + __ret_644 = __builtin_shufflevector(__ret_644, __ret_644, 1, 0); \ + __ret_644; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \ + int32x4_t __ret_645; \ + int16x8_t __s0_645 = __p0_645; \ + int16x8_t __s1_645 = __p1_645; \ + __ret_645 = vqdmull_s16(vget_high_s16(__s0_645), splat_laneq_s16(__s1_645, __p2_645)); \ + __ret_645; \ +}) +#else +#define vqdmull_high_laneq_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \ + int32x4_t __ret_646; \ + int16x8_t __s0_646 = __p0_646; \ + int16x8_t __s1_646 = __p1_646; \ + int16x8_t __rev0_646; __rev0_646 = __builtin_shufflevector(__s0_646, __s0_646, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_646; __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_646 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_646), __noswap_splat_laneq_s16(__rev1_646, __p2_646)); \ + __ret_646 = __builtin_shufflevector(__ret_646, __ret_646, 3, 2, 1, 0); \ + __ret_646; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_lane_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ + int64_t __ret_647; \ + int32_t __s0_647 = __p0_647; \ + int32x2_t __s1_647 = __p1_647; \ + __ret_647 = vqdmulls_s32(__s0_647, vget_lane_s32(__s1_647, __p2_647)); \ + __ret_647; \ +}) +#else +#define vqdmulls_lane_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \ + int64_t __ret_648; \ + int32_t __s0_648 = __p0_648; \ + int32x2_t __s1_648 = __p1_648; \ + int32x2_t __rev1_648; __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 1, 0); \ + __ret_648 = vqdmulls_s32(__s0_648, __noswap_vget_lane_s32(__rev1_648, __p2_648)); \ + __ret_648; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_lane_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \ + int32_t __ret_649; \ + int16_t __s0_649 = __p0_649; \ + int16x4_t __s1_649 = __p1_649; \ + __ret_649 = vqdmullh_s16(__s0_649, vget_lane_s16(__s1_649, __p2_649)); \ + __ret_649; \ +}) +#else +#define vqdmullh_lane_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \ + int32_t __ret_650; \ + int16_t __s0_650 = __p0_650; \ + int16x4_t __s1_650 = __p1_650; \ + int16x4_t __rev1_650; __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 3, 2, 1, 0); \ + __ret_650 = vqdmullh_s16(__s0_650, __noswap_vget_lane_s16(__rev1_650, __p2_650)); \ + __ret_650; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \ + int64_t __ret_651; \ + int32_t __s0_651 = __p0_651; \ + int32x4_t __s1_651 = __p1_651; \ + __ret_651 = vqdmulls_s32(__s0_651, vgetq_lane_s32(__s1_651, __p2_651)); \ + __ret_651; \ +}) +#else +#define vqdmulls_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ + int64_t __ret_652; \ + int32_t __s0_652 = __p0_652; \ + int32x4_t __s1_652 = __p1_652; \ + int32x4_t __rev1_652; __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \ + __ret_652 = vqdmulls_s32(__s0_652, __noswap_vgetq_lane_s32(__rev1_652, __p2_652)); \ + __ret_652; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \ + int32_t __ret_653; \ + int16_t __s0_653 = __p0_653; \ + int16x8_t __s1_653 = __p1_653; \ + __ret_653 = vqdmullh_s16(__s0_653, vgetq_lane_s16(__s1_653, __p2_653)); \ + __ret_653; \ +}) +#else +#define vqdmullh_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ + int32_t __ret_654; \ + int16_t __s0_654 = __p0_654; \ + int16x8_t __s1_654 = __p1_654; \ + int16x8_t __rev1_654; __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_654 = vqdmullh_s16(__s0_654, __noswap_vgetq_lane_s16(__rev1_654, __p2_654)); \ + __ret_654; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \ + int64x2_t __ret_655; \ + int32x2_t __s0_655 = __p0_655; \ + int32x4_t __s1_655 = __p1_655; \ + __ret_655 = vqdmull_s32(__s0_655, splat_laneq_s32(__s1_655, __p2_655)); \ + __ret_655; \ +}) +#else +#define vqdmull_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ + int64x2_t __ret_656; \ + int32x2_t __s0_656 = __p0_656; \ + int32x4_t __s1_656 = __p1_656; \ + int32x2_t __rev0_656; __rev0_656 = __builtin_shufflevector(__s0_656, __s0_656, 1, 0); \ + int32x4_t __rev1_656; __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 3, 2, 1, 0); \ + __ret_656 = __noswap_vqdmull_s32(__rev0_656, __noswap_splat_laneq_s32(__rev1_656, __p2_656)); \ + __ret_656 = __builtin_shufflevector(__ret_656, __ret_656, 1, 0); \ + __ret_656; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \ + int32x4_t __ret_657; \ + int16x4_t __s0_657 = __p0_657; \ + int16x8_t __s1_657 = __p1_657; \ + __ret_657 = vqdmull_s16(__s0_657, splat_laneq_s16(__s1_657, __p2_657)); \ + __ret_657; \ +}) +#else +#define vqdmull_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ + int32x4_t __ret_658; \ + int16x4_t __s0_658 = __p0_658; \ + int16x8_t __s1_658 = __p1_658; \ + int16x4_t __rev0_658; __rev0_658 = __builtin_shufflevector(__s0_658, __s0_658, 3, 2, 1, 0); \ + int16x8_t __rev1_658; __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_658 = __noswap_vqdmull_s16(__rev0_658, __noswap_splat_laneq_s16(__rev1_658, __p2_658)); \ + __ret_658 = __builtin_shufflevector(__ret_658, __ret_658, 3, 2, 1, 0); \ + __ret_658; \ +}) +#endif + +__ai int16_t vqmovns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); + return __ret; +} +__ai int32_t vqmovnd_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); + return __ret; +} +__ai int8_t vqmovnh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); + return __ret; +} +__ai uint16_t vqmovns_u32(uint32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); + return __ret; +} +__ai uint32_t vqmovnd_u64(uint64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); + return __ret; +} +__ai uint8_t vqmovnh_u16(uint16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); + return __ret; +} +#else +__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); + return __ret; +} +#else +__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); + return __ret; +} +#else +__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint16_t vqmovuns_s32(int32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); + return __ret; +} +__ai uint32_t vqmovund_s64(int64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); + return __ret; +} +__ai uint8_t vqmovunh_s16(int16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int8_t vqnegb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); + return __ret; +} +__ai int32_t vqnegs_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); + return __ret; +} +__ai int64_t vqnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); + return __ret; +} +__ai int16_t vqnegh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); + return __ret; +} +__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); + return __ret; +} +__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_lane_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \ + int32_t __ret_659; \ + int32_t __s0_659 = __p0_659; \ + int32x2_t __s1_659 = __p1_659; \ + __ret_659 = vqrdmulhs_s32(__s0_659, vget_lane_s32(__s1_659, __p2_659)); \ + __ret_659; \ +}) +#else +#define vqrdmulhs_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ + int32_t __ret_660; \ + int32_t __s0_660 = __p0_660; \ + int32x2_t __s1_660 = __p1_660; \ + int32x2_t __rev1_660; __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 1, 0); \ + __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vget_lane_s32(__rev1_660, __p2_660)); \ + __ret_660; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_lane_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \ + int16_t __ret_661; \ + int16_t __s0_661 = __p0_661; \ + int16x4_t __s1_661 = __p1_661; \ + __ret_661 = vqrdmulhh_s16(__s0_661, vget_lane_s16(__s1_661, __p2_661)); \ + __ret_661; \ +}) +#else +#define vqrdmulhh_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ + int16_t __ret_662; \ + int16_t __s0_662 = __p0_662; \ + int16x4_t __s1_662 = __p1_662; \ + int16x4_t __rev1_662; __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 3, 2, 1, 0); \ + __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vget_lane_s16(__rev1_662, __p2_662)); \ + __ret_662; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_laneq_s32(__p0_663, __p1_663, __p2_663) __extension__ ({ \ + int32_t __ret_663; \ + int32_t __s0_663 = __p0_663; \ + int32x4_t __s1_663 = __p1_663; \ + __ret_663 = vqrdmulhs_s32(__s0_663, vgetq_lane_s32(__s1_663, __p2_663)); \ + __ret_663; \ +}) +#else +#define vqrdmulhs_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ + int32_t __ret_664; \ + int32_t __s0_664 = __p0_664; \ + int32x4_t __s1_664 = __p1_664; \ + int32x4_t __rev1_664; __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \ + __ret_664 = vqrdmulhs_s32(__s0_664, __noswap_vgetq_lane_s32(__rev1_664, __p2_664)); \ + __ret_664; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_laneq_s16(__p0_665, __p1_665, __p2_665) __extension__ ({ \ + int16_t __ret_665; \ + int16_t __s0_665 = __p0_665; \ + int16x8_t __s1_665 = __p1_665; \ + __ret_665 = vqrdmulhh_s16(__s0_665, vgetq_lane_s16(__s1_665, __p2_665)); \ + __ret_665; \ +}) +#else +#define vqrdmulhh_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ + int16_t __ret_666; \ + int16_t __s0_666 = __p0_666; \ + int16x8_t __s1_666 = __p1_666; \ + int16x8_t __rev1_666; __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_666 = vqrdmulhh_s16(__s0_666, __noswap_vgetq_lane_s16(__rev1_666, __p2_666)); \ + __ret_666; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u32(__p0_667, __p1_667, __p2_667) __extension__ ({ \ + uint16x8_t __ret_667; \ + uint16x4_t __s0_667 = __p0_667; \ + uint32x4_t __s1_667 = __p1_667; \ + __ret_667 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_667), (uint16x4_t)(vqrshrn_n_u32(__s1_667, __p2_667)))); \ + __ret_667; \ +}) +#else +#define vqrshrn_high_n_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ + uint16x8_t __ret_668; \ + uint16x4_t __s0_668 = __p0_668; \ + uint32x4_t __s1_668 = __p1_668; \ + uint16x4_t __rev0_668; __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 3, 2, 1, 0); \ + uint32x4_t __rev1_668; __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 3, 2, 1, 0); \ + __ret_668 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_668), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_668, __p2_668)))); \ + __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_668; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u64(__p0_669, __p1_669, __p2_669) __extension__ ({ \ + uint32x4_t __ret_669; \ + uint32x2_t __s0_669 = __p0_669; \ + uint64x2_t __s1_669 = __p1_669; \ + __ret_669 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_669), (uint32x2_t)(vqrshrn_n_u64(__s1_669, __p2_669)))); \ + __ret_669; \ +}) +#else +#define vqrshrn_high_n_u64(__p0_670, __p1_670, __p2_670) __extension__ ({ \ + uint32x4_t __ret_670; \ + uint32x2_t __s0_670 = __p0_670; \ + uint64x2_t __s1_670 = __p1_670; \ + uint32x2_t __rev0_670; __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 1, 0); \ + uint64x2_t __rev1_670; __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 1, 0); \ + __ret_670 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_670), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_670, __p2_670)))); \ + __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 3, 2, 1, 0); \ + __ret_670; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ + uint8x16_t __ret_671; \ + uint8x8_t __s0_671 = __p0_671; \ + uint16x8_t __s1_671 = __p1_671; \ + __ret_671 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_671), (uint8x8_t)(vqrshrn_n_u16(__s1_671, __p2_671)))); \ + __ret_671; \ +}) +#else +#define vqrshrn_high_n_u16(__p0_672, __p1_672, __p2_672) __extension__ ({ \ + uint8x16_t __ret_672; \ + uint8x8_t __s0_672 = __p0_672; \ + uint16x8_t __s1_672 = __p1_672; \ + uint8x8_t __rev0_672; __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_672; __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_672 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_672), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_672, __p2_672)))); \ + __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_672; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ + int16x8_t __ret_673; \ + int16x4_t __s0_673 = __p0_673; \ + int32x4_t __s1_673 = __p1_673; \ + __ret_673 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_673), (int16x4_t)(vqrshrn_n_s32(__s1_673, __p2_673)))); \ + __ret_673; \ +}) +#else +#define vqrshrn_high_n_s32(__p0_674, __p1_674, __p2_674) __extension__ ({ \ + int16x8_t __ret_674; \ + int16x4_t __s0_674 = __p0_674; \ + int32x4_t __s1_674 = __p1_674; \ + int16x4_t __rev0_674; __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 3, 2, 1, 0); \ + int32x4_t __rev1_674; __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 3, 2, 1, 0); \ + __ret_674 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_674), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_674, __p2_674)))); \ + __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_674; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s64(__p0_675, __p1_675, __p2_675) __extension__ ({ \ + int32x4_t __ret_675; \ + int32x2_t __s0_675 = __p0_675; \ + int64x2_t __s1_675 = __p1_675; \ + __ret_675 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_675), (int32x2_t)(vqrshrn_n_s64(__s1_675, __p2_675)))); \ + __ret_675; \ +}) +#else +#define vqrshrn_high_n_s64(__p0_676, __p1_676, __p2_676) __extension__ ({ \ + int32x4_t __ret_676; \ + int32x2_t __s0_676 = __p0_676; \ + int64x2_t __s1_676 = __p1_676; \ + int32x2_t __rev0_676; __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 1, 0); \ + int64x2_t __rev1_676; __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 1, 0); \ + __ret_676 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_676), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_676, __p2_676)))); \ + __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 3, 2, 1, 0); \ + __ret_676; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s16(__p0_677, __p1_677, __p2_677) __extension__ ({ \ + int8x16_t __ret_677; \ + int8x8_t __s0_677 = __p0_677; \ + int16x8_t __s1_677 = __p1_677; \ + __ret_677 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_677), (int8x8_t)(vqrshrn_n_s16(__s1_677, __p2_677)))); \ + __ret_677; \ +}) +#else +#define vqrshrn_high_n_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ + int8x16_t __ret_678; \ + int8x8_t __s0_678 = __p0_678; \ + int16x8_t __s1_678 = __p1_678; \ + int8x8_t __rev0_678; __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_678; __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_678 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_678), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_678, __p2_678)))); \ + __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_678; \ +}) +#endif + +#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s32(__p0_679, __p1_679, __p2_679) __extension__ ({ \ + int16x8_t __ret_679; \ + int16x4_t __s0_679 = __p0_679; \ + int32x4_t __s1_679 = __p1_679; \ + __ret_679 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_679), (int16x4_t)(vqrshrun_n_s32(__s1_679, __p2_679)))); \ + __ret_679; \ +}) +#else +#define vqrshrun_high_n_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ + int16x8_t __ret_680; \ + int16x4_t __s0_680 = __p0_680; \ + int32x4_t __s1_680 = __p1_680; \ + int16x4_t __rev0_680; __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 3, 2, 1, 0); \ + int32x4_t __rev1_680; __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 3, 2, 1, 0); \ + __ret_680 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_680), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_680, __p2_680)))); \ + __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_680; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s64(__p0_681, __p1_681, __p2_681) __extension__ ({ \ + int32x4_t __ret_681; \ + int32x2_t __s0_681 = __p0_681; \ + int64x2_t __s1_681 = __p1_681; \ + __ret_681 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_681), (int32x2_t)(vqrshrun_n_s64(__s1_681, __p2_681)))); \ + __ret_681; \ +}) +#else +#define vqrshrun_high_n_s64(__p0_682, __p1_682, __p2_682) __extension__ ({ \ + int32x4_t __ret_682; \ + int32x2_t __s0_682 = __p0_682; \ + int64x2_t __s1_682 = __p1_682; \ + int32x2_t __rev0_682; __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 1, 0); \ + int64x2_t __rev1_682; __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \ + __ret_682 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_682), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_682, __p2_682)))); \ + __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \ + __ret_682; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ + int8x16_t __ret_683; \ + int8x8_t __s0_683 = __p0_683; \ + int16x8_t __s1_683 = __p1_683; \ + __ret_683 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_683), (int8x8_t)(vqrshrun_n_s16(__s1_683, __p2_683)))); \ + __ret_683; \ +}) +#else +#define vqrshrun_high_n_s16(__p0_684, __p1_684, __p2_684) __extension__ ({ \ + int8x16_t __ret_684; \ + int8x8_t __s0_684 = __p0_684; \ + int16x8_t __s1_684 = __p1_684; \ + int8x8_t __rev0_684; __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_684; __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_684 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_684), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_684, __p2_684)))); \ + __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_684; \ +}) +#endif + +#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); + return __ret; +} +#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ + __ret; \ +}) +#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ + uint16x8_t __ret_685; \ + uint16x4_t __s0_685 = __p0_685; \ + uint32x4_t __s1_685 = __p1_685; \ + __ret_685 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_685), (uint16x4_t)(vqshrn_n_u32(__s1_685, __p2_685)))); \ + __ret_685; \ +}) +#else +#define vqshrn_high_n_u32(__p0_686, __p1_686, __p2_686) __extension__ ({ \ + uint16x8_t __ret_686; \ + uint16x4_t __s0_686 = __p0_686; \ + uint32x4_t __s1_686 = __p1_686; \ + uint16x4_t __rev0_686; __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 3, 2, 1, 0); \ + uint32x4_t __rev1_686; __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 3, 2, 1, 0); \ + __ret_686 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_686), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_686, __p2_686)))); \ + __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_686; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u64(__p0_687, __p1_687, __p2_687) __extension__ ({ \ + uint32x4_t __ret_687; \ + uint32x2_t __s0_687 = __p0_687; \ + uint64x2_t __s1_687 = __p1_687; \ + __ret_687 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_687), (uint32x2_t)(vqshrn_n_u64(__s1_687, __p2_687)))); \ + __ret_687; \ +}) +#else +#define vqshrn_high_n_u64(__p0_688, __p1_688, __p2_688) __extension__ ({ \ + uint32x4_t __ret_688; \ + uint32x2_t __s0_688 = __p0_688; \ + uint64x2_t __s1_688 = __p1_688; \ + uint32x2_t __rev0_688; __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 1, 0); \ + uint64x2_t __rev1_688; __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 1, 0); \ + __ret_688 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_688), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_688, __p2_688)))); \ + __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 3, 2, 1, 0); \ + __ret_688; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u16(__p0_689, __p1_689, __p2_689) __extension__ ({ \ + uint8x16_t __ret_689; \ + uint8x8_t __s0_689 = __p0_689; \ + uint16x8_t __s1_689 = __p1_689; \ + __ret_689 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_689), (uint8x8_t)(vqshrn_n_u16(__s1_689, __p2_689)))); \ + __ret_689; \ +}) +#else +#define vqshrn_high_n_u16(__p0_690, __p1_690, __p2_690) __extension__ ({ \ + uint8x16_t __ret_690; \ + uint8x8_t __s0_690 = __p0_690; \ + uint16x8_t __s1_690 = __p1_690; \ + uint8x8_t __rev0_690; __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_690; __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_690 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_690), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_690, __p2_690)))); \ + __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_690; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s32(__p0_691, __p1_691, __p2_691) __extension__ ({ \ + int16x8_t __ret_691; \ + int16x4_t __s0_691 = __p0_691; \ + int32x4_t __s1_691 = __p1_691; \ + __ret_691 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_691), (int16x4_t)(vqshrn_n_s32(__s1_691, __p2_691)))); \ + __ret_691; \ +}) +#else +#define vqshrn_high_n_s32(__p0_692, __p1_692, __p2_692) __extension__ ({ \ + int16x8_t __ret_692; \ + int16x4_t __s0_692 = __p0_692; \ + int32x4_t __s1_692 = __p1_692; \ + int16x4_t __rev0_692; __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \ + int32x4_t __rev1_692; __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \ + __ret_692 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_692), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_692, __p2_692)))); \ + __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_692; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s64(__p0_693, __p1_693, __p2_693) __extension__ ({ \ + int32x4_t __ret_693; \ + int32x2_t __s0_693 = __p0_693; \ + int64x2_t __s1_693 = __p1_693; \ + __ret_693 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_693), (int32x2_t)(vqshrn_n_s64(__s1_693, __p2_693)))); \ + __ret_693; \ +}) +#else +#define vqshrn_high_n_s64(__p0_694, __p1_694, __p2_694) __extension__ ({ \ + int32x4_t __ret_694; \ + int32x2_t __s0_694 = __p0_694; \ + int64x2_t __s1_694 = __p1_694; \ + int32x2_t __rev0_694; __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \ + int64x2_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \ + __ret_694 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_694), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_694, __p2_694)))); \ + __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 3, 2, 1, 0); \ + __ret_694; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s16(__p0_695, __p1_695, __p2_695) __extension__ ({ \ + int8x16_t __ret_695; \ + int8x8_t __s0_695 = __p0_695; \ + int16x8_t __s1_695 = __p1_695; \ + __ret_695 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_695), (int8x8_t)(vqshrn_n_s16(__s1_695, __p2_695)))); \ + __ret_695; \ +}) +#else +#define vqshrn_high_n_s16(__p0_696, __p1_696, __p2_696) __extension__ ({ \ + int8x16_t __ret_696; \ + int8x8_t __s0_696 = __p0_696; \ + int16x8_t __s1_696 = __p1_696; \ + int8x8_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_696; __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_696 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_696), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_696, __p2_696)))); \ + __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_696; \ +}) +#endif + +#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s32(__p0_697, __p1_697, __p2_697) __extension__ ({ \ + int16x8_t __ret_697; \ + int16x4_t __s0_697 = __p0_697; \ + int32x4_t __s1_697 = __p1_697; \ + __ret_697 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_697), (int16x4_t)(vqshrun_n_s32(__s1_697, __p2_697)))); \ + __ret_697; \ +}) +#else +#define vqshrun_high_n_s32(__p0_698, __p1_698, __p2_698) __extension__ ({ \ + int16x8_t __ret_698; \ + int16x4_t __s0_698 = __p0_698; \ + int32x4_t __s1_698 = __p1_698; \ + int16x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ + int32x4_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 3, 2, 1, 0); \ + __ret_698 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_698), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_698, __p2_698)))); \ + __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_698; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s64(__p0_699, __p1_699, __p2_699) __extension__ ({ \ + int32x4_t __ret_699; \ + int32x2_t __s0_699 = __p0_699; \ + int64x2_t __s1_699 = __p1_699; \ + __ret_699 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_699), (int32x2_t)(vqshrun_n_s64(__s1_699, __p2_699)))); \ + __ret_699; \ +}) +#else +#define vqshrun_high_n_s64(__p0_700, __p1_700, __p2_700) __extension__ ({ \ + int32x4_t __ret_700; \ + int32x2_t __s0_700 = __p0_700; \ + int64x2_t __s1_700 = __p1_700; \ + int32x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ + int64x2_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \ + __ret_700 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_700), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_700, __p2_700)))); \ + __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 3, 2, 1, 0); \ + __ret_700; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s16(__p0_701, __p1_701, __p2_701) __extension__ ({ \ + int8x16_t __ret_701; \ + int8x8_t __s0_701 = __p0_701; \ + int16x8_t __s1_701 = __p1_701; \ + __ret_701 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_701), (int8x8_t)(vqshrun_n_s16(__s1_701, __p2_701)))); \ + __ret_701; \ +}) +#else +#define vqshrun_high_n_s16(__p0_702, __p1_702, __p2_702) __extension__ ({ \ + int8x16_t __ret_702; \ + int8x8_t __s0_702 = __p0_702; \ + int16x8_t __s1_702 = __p1_702; \ + int8x8_t __rev0_702; __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_702 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_702), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_702, __p2_702)))); \ + __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_702; \ +}) +#endif + +#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrecpe_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); + return __ret; +} +__ai float64_t vrecped_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); + return __ret; +} +__ai float32_t vrecpes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); + return __ret; +} +__ai float64_t vrecpxd_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); + return __ret; +} +__ai float32_t vrecpxs_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); + return __ret; +} +__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); + return __ret; +} +#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u32(__p0_703, __p1_703, __p2_703) __extension__ ({ \ + uint16x8_t __ret_703; \ + uint16x4_t __s0_703 = __p0_703; \ + uint32x4_t __s1_703 = __p1_703; \ + __ret_703 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_703), (uint16x4_t)(vrshrn_n_u32(__s1_703, __p2_703)))); \ + __ret_703; \ +}) +#else +#define vrshrn_high_n_u32(__p0_704, __p1_704, __p2_704) __extension__ ({ \ + uint16x8_t __ret_704; \ + uint16x4_t __s0_704 = __p0_704; \ + uint32x4_t __s1_704 = __p1_704; \ + uint16x4_t __rev0_704; __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 3, 2, 1, 0); \ + uint32x4_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \ + __ret_704 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_704), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_704, __p2_704)))); \ + __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_704; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u64(__p0_705, __p1_705, __p2_705) __extension__ ({ \ + uint32x4_t __ret_705; \ + uint32x2_t __s0_705 = __p0_705; \ + uint64x2_t __s1_705 = __p1_705; \ + __ret_705 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_705), (uint32x2_t)(vrshrn_n_u64(__s1_705, __p2_705)))); \ + __ret_705; \ +}) +#else +#define vrshrn_high_n_u64(__p0_706, __p1_706, __p2_706) __extension__ ({ \ + uint32x4_t __ret_706; \ + uint32x2_t __s0_706 = __p0_706; \ + uint64x2_t __s1_706 = __p1_706; \ + uint32x2_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \ + uint64x2_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \ + __ret_706 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_706), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_706, __p2_706)))); \ + __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \ + __ret_706; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u16(__p0_707, __p1_707, __p2_707) __extension__ ({ \ + uint8x16_t __ret_707; \ + uint8x8_t __s0_707 = __p0_707; \ + uint16x8_t __s1_707 = __p1_707; \ + __ret_707 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_707), (uint8x8_t)(vrshrn_n_u16(__s1_707, __p2_707)))); \ + __ret_707; \ +}) +#else +#define vrshrn_high_n_u16(__p0_708, __p1_708, __p2_708) __extension__ ({ \ + uint8x16_t __ret_708; \ + uint8x8_t __s0_708 = __p0_708; \ + uint16x8_t __s1_708 = __p1_708; \ + uint8x8_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_708 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_708), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_708, __p2_708)))); \ + __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_708; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s32(__p0_709, __p1_709, __p2_709) __extension__ ({ \ + int16x8_t __ret_709; \ + int16x4_t __s0_709 = __p0_709; \ + int32x4_t __s1_709 = __p1_709; \ + __ret_709 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_709), (int16x4_t)(vrshrn_n_s32(__s1_709, __p2_709)))); \ + __ret_709; \ +}) +#else +#define vrshrn_high_n_s32(__p0_710, __p1_710, __p2_710) __extension__ ({ \ + int16x8_t __ret_710; \ + int16x4_t __s0_710 = __p0_710; \ + int32x4_t __s1_710 = __p1_710; \ + int16x4_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \ + int32x4_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \ + __ret_710 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_710), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_710, __p2_710)))); \ + __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_710; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s64(__p0_711, __p1_711, __p2_711) __extension__ ({ \ + int32x4_t __ret_711; \ + int32x2_t __s0_711 = __p0_711; \ + int64x2_t __s1_711 = __p1_711; \ + __ret_711 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_711), (int32x2_t)(vrshrn_n_s64(__s1_711, __p2_711)))); \ + __ret_711; \ +}) +#else +#define vrshrn_high_n_s64(__p0_712, __p1_712, __p2_712) __extension__ ({ \ + int32x4_t __ret_712; \ + int32x2_t __s0_712 = __p0_712; \ + int64x2_t __s1_712 = __p1_712; \ + int32x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ + int64x2_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 1, 0); \ + __ret_712 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_712), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_712, __p2_712)))); \ + __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 3, 2, 1, 0); \ + __ret_712; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s16(__p0_713, __p1_713, __p2_713) __extension__ ({ \ + int8x16_t __ret_713; \ + int8x8_t __s0_713 = __p0_713; \ + int16x8_t __s1_713 = __p1_713; \ + __ret_713 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_713), (int8x8_t)(vrshrn_n_s16(__s1_713, __p2_713)))); \ + __ret_713; \ +}) +#else +#define vrshrn_high_n_s16(__p0_714, __p1_714, __p2_714) __extension__ ({ \ + int8x16_t __ret_714; \ + int8x8_t __s0_714 = __p0_714; \ + int16x8_t __s1_714 = __p1_714; \ + int8x8_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_714 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_714), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_714, __p2_714)))); \ + __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_714; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); + return __ret; +} +__ai float64_t vrsqrted_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); + return __ret; +} +__ai float32_t vrsqrtes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); + return __ret; +} +#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ + __ret; \ +}) +__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); + return __ret; +} +__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); + return __ret; +} +#define vshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u8(__p0_715, __p1_715) __extension__ ({ \ + uint16x8_t __ret_715; \ + uint8x16_t __s0_715 = __p0_715; \ + __ret_715 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_715), __p1_715)); \ + __ret_715; \ +}) +#else +#define vshll_high_n_u8(__p0_716, __p1_716) __extension__ ({ \ + uint16x8_t __ret_716; \ + uint8x16_t __s0_716 = __p0_716; \ + uint8x16_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_716 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_716), __p1_716)); \ + __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_716; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u32(__p0_717, __p1_717) __extension__ ({ \ + uint64x2_t __ret_717; \ + uint32x4_t __s0_717 = __p0_717; \ + __ret_717 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_717), __p1_717)); \ + __ret_717; \ +}) +#else +#define vshll_high_n_u32(__p0_718, __p1_718) __extension__ ({ \ + uint64x2_t __ret_718; \ + uint32x4_t __s0_718 = __p0_718; \ + uint32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ + __ret_718 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_718), __p1_718)); \ + __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 1, 0); \ + __ret_718; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u16(__p0_719, __p1_719) __extension__ ({ \ + uint32x4_t __ret_719; \ + uint16x8_t __s0_719 = __p0_719; \ + __ret_719 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_719), __p1_719)); \ + __ret_719; \ +}) +#else +#define vshll_high_n_u16(__p0_720, __p1_720) __extension__ ({ \ + uint32x4_t __ret_720; \ + uint16x8_t __s0_720 = __p0_720; \ + uint16x8_t __rev0_720; __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_720 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_720), __p1_720)); \ + __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 3, 2, 1, 0); \ + __ret_720; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s8(__p0_721, __p1_721) __extension__ ({ \ + int16x8_t __ret_721; \ + int8x16_t __s0_721 = __p0_721; \ + __ret_721 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_721), __p1_721)); \ + __ret_721; \ +}) +#else +#define vshll_high_n_s8(__p0_722, __p1_722) __extension__ ({ \ + int16x8_t __ret_722; \ + int8x16_t __s0_722 = __p0_722; \ + int8x16_t __rev0_722; __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_722 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_722), __p1_722)); \ + __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_722; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s32(__p0_723, __p1_723) __extension__ ({ \ + int64x2_t __ret_723; \ + int32x4_t __s0_723 = __p0_723; \ + __ret_723 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_723), __p1_723)); \ + __ret_723; \ +}) +#else +#define vshll_high_n_s32(__p0_724, __p1_724) __extension__ ({ \ + int64x2_t __ret_724; \ + int32x4_t __s0_724 = __p0_724; \ + int32x4_t __rev0_724; __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \ + __ret_724 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_724), __p1_724)); \ + __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \ + __ret_724; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s16(__p0_725, __p1_725) __extension__ ({ \ + int32x4_t __ret_725; \ + int16x8_t __s0_725 = __p0_725; \ + __ret_725 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_725), __p1_725)); \ + __ret_725; \ +}) +#else +#define vshll_high_n_s16(__p0_726, __p1_726) __extension__ ({ \ + int32x4_t __ret_726; \ + int16x8_t __s0_726 = __p0_726; \ + int16x8_t __rev0_726; __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_726 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_726), __p1_726)); \ + __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \ + __ret_726; \ +}) +#endif + +#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u32(__p0_727, __p1_727, __p2_727) __extension__ ({ \ + uint16x8_t __ret_727; \ + uint16x4_t __s0_727 = __p0_727; \ + uint32x4_t __s1_727 = __p1_727; \ + __ret_727 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_727), (uint16x4_t)(vshrn_n_u32(__s1_727, __p2_727)))); \ + __ret_727; \ +}) +#else +#define vshrn_high_n_u32(__p0_728, __p1_728, __p2_728) __extension__ ({ \ + uint16x8_t __ret_728; \ + uint16x4_t __s0_728 = __p0_728; \ + uint32x4_t __s1_728 = __p1_728; \ + uint16x4_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \ + uint32x4_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \ + __ret_728 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_728), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_728, __p2_728)))); \ + __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_728; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u64(__p0_729, __p1_729, __p2_729) __extension__ ({ \ + uint32x4_t __ret_729; \ + uint32x2_t __s0_729 = __p0_729; \ + uint64x2_t __s1_729 = __p1_729; \ + __ret_729 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_729), (uint32x2_t)(vshrn_n_u64(__s1_729, __p2_729)))); \ + __ret_729; \ +}) +#else +#define vshrn_high_n_u64(__p0_730, __p1_730, __p2_730) __extension__ ({ \ + uint32x4_t __ret_730; \ + uint32x2_t __s0_730 = __p0_730; \ + uint64x2_t __s1_730 = __p1_730; \ + uint32x2_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 1, 0); \ + uint64x2_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 1, 0); \ + __ret_730 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_730), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_730, __p2_730)))); \ + __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ + __ret_730; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u16(__p0_731, __p1_731, __p2_731) __extension__ ({ \ + uint8x16_t __ret_731; \ + uint8x8_t __s0_731 = __p0_731; \ + uint16x8_t __s1_731 = __p1_731; \ + __ret_731 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_731), (uint8x8_t)(vshrn_n_u16(__s1_731, __p2_731)))); \ + __ret_731; \ +}) +#else +#define vshrn_high_n_u16(__p0_732, __p1_732, __p2_732) __extension__ ({ \ + uint8x16_t __ret_732; \ + uint8x8_t __s0_732 = __p0_732; \ + uint16x8_t __s1_732 = __p1_732; \ + uint8x8_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_732 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_732), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_732, __p2_732)))); \ + __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_732; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s32(__p0_733, __p1_733, __p2_733) __extension__ ({ \ + int16x8_t __ret_733; \ + int16x4_t __s0_733 = __p0_733; \ + int32x4_t __s1_733 = __p1_733; \ + __ret_733 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_733), (int16x4_t)(vshrn_n_s32(__s1_733, __p2_733)))); \ + __ret_733; \ +}) +#else +#define vshrn_high_n_s32(__p0_734, __p1_734, __p2_734) __extension__ ({ \ + int16x8_t __ret_734; \ + int16x4_t __s0_734 = __p0_734; \ + int32x4_t __s1_734 = __p1_734; \ + int16x4_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \ + int32x4_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \ + __ret_734 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_734), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_734, __p2_734)))); \ + __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_734; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s64(__p0_735, __p1_735, __p2_735) __extension__ ({ \ + int32x4_t __ret_735; \ + int32x2_t __s0_735 = __p0_735; \ + int64x2_t __s1_735 = __p1_735; \ + __ret_735 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_735), (int32x2_t)(vshrn_n_s64(__s1_735, __p2_735)))); \ + __ret_735; \ +}) +#else +#define vshrn_high_n_s64(__p0_736, __p1_736, __p2_736) __extension__ ({ \ + int32x4_t __ret_736; \ + int32x2_t __s0_736 = __p0_736; \ + int64x2_t __s1_736 = __p1_736; \ + int32x2_t __rev0_736; __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 1, 0); \ + int64x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ + __ret_736 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_736), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_736, __p2_736)))); \ + __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \ + __ret_736; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ + int8x16_t __ret_737; \ + int8x8_t __s0_737 = __p0_737; \ + int16x8_t __s1_737 = __p1_737; \ + __ret_737 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_737), (int8x8_t)(vshrn_n_s16(__s1_737, __p2_737)))); \ + __ret_737; \ +}) +#else +#define vshrn_high_n_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ + int8x16_t __ret_738; \ + int8x8_t __s0_738 = __p0_738; \ + int16x8_t __s1_738 = __p1_738; \ + int8x8_t __rev0_738; __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_738 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_738), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_738, __p2_738)))); \ + __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_738; \ +}) +#endif + +#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vsqrt_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vst1_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ +}) +#else +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ +}) +#else +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ +}) +#endif + +#define vst1_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ +}) +#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ +}) +#else +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ +}) +#else +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) +#endif + +#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst2_p64(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) +#else +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) +#endif + +#define vst2_f64(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ +}) +#else +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ +}) +#else +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ +}) +#else +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ +}) +#else +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ +}) +#else +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ +}) +#else +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ +}) +#else +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ +}) +#endif + +#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ +}) +#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ +}) +#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ +}) +#define vst3_p64(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) +#else +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#define vst3_f64(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ +}) +#else +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ +}) +#else +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ +}) +#else +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ +}) +#else +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ +}) +#else +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ +}) +#else +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ +}) +#else +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ +}) +#endif + +#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ +}) +#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ +}) +#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ +}) +#define vst4_p64(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#define vst4_f64(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ +}) +#else +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ +}) +#else +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ +}) +#else +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ +}) +#else +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ +}) +#else +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ +}) +#else +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ +}) +#else +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ +}) +#endif + +#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +}) +#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ +}) +#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ +}) +#define vstrq_p128(__p0, __p1) __extension__ ({ \ + poly128_t __s1 = __p1; \ + __builtin_neon_vstrq_p128(__p0, __s1); \ +}) +__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); + return __ret; +} +__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); + return __ret; +} +__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); + return __ret; +} +__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); + return __ret; +} +__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("aes"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_bf16(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \ + bfloat16x8_t __ret_739; \ + bfloat16x8_t __s0_739 = __p0_739; \ + bfloat16x4_t __s2_739 = __p2_739; \ + __ret_739 = vsetq_lane_bf16(vget_lane_bf16(__s2_739, __p3_739), __s0_739, __p1_739); \ + __ret_739; \ +}) +#else +#define vcopyq_lane_bf16(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \ + bfloat16x8_t __ret_740; \ + bfloat16x8_t __s0_740 = __p0_740; \ + bfloat16x4_t __s2_740 = __p2_740; \ + bfloat16x8_t __rev0_740; __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_740; __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 3, 2, 1, 0); \ + __ret_740 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_740, __p3_740), __rev0_740, __p1_740); \ + __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_740; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_bf16(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \ + bfloat16x4_t __ret_741; \ + bfloat16x4_t __s0_741 = __p0_741; \ + bfloat16x4_t __s2_741 = __p2_741; \ + __ret_741 = vset_lane_bf16(vget_lane_bf16(__s2_741, __p3_741), __s0_741, __p1_741); \ + __ret_741; \ +}) +#else +#define vcopy_lane_bf16(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \ + bfloat16x4_t __ret_742; \ + bfloat16x4_t __s0_742 = __p0_742; \ + bfloat16x4_t __s2_742 = __p2_742; \ + bfloat16x4_t __rev0_742; __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_742; __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 3, 2, 1, 0); \ + __ret_742 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_742, __p3_742), __rev0_742, __p1_742); \ + __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 3, 2, 1, 0); \ + __ret_742; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_bf16(__p0_743, __p1_743, __p2_743, __p3_743) __extension__ ({ \ + bfloat16x8_t __ret_743; \ + bfloat16x8_t __s0_743 = __p0_743; \ + bfloat16x8_t __s2_743 = __p2_743; \ + __ret_743 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_743, __p3_743), __s0_743, __p1_743); \ + __ret_743; \ +}) +#else +#define vcopyq_laneq_bf16(__p0_744, __p1_744, __p2_744, __p3_744) __extension__ ({ \ + bfloat16x8_t __ret_744; \ + bfloat16x8_t __s0_744 = __p0_744; \ + bfloat16x8_t __s2_744 = __p2_744; \ + bfloat16x8_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_744; __rev2_744 = __builtin_shufflevector(__s2_744, __s2_744, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_744 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_744, __p3_744), __rev0_744, __p1_744); \ + __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_744; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_bf16(__p0_745, __p1_745, __p2_745, __p3_745) __extension__ ({ \ + bfloat16x4_t __ret_745; \ + bfloat16x4_t __s0_745 = __p0_745; \ + bfloat16x8_t __s2_745 = __p2_745; \ + __ret_745 = vset_lane_bf16(vgetq_lane_bf16(__s2_745, __p3_745), __s0_745, __p1_745); \ + __ret_745; \ +}) +#else +#define vcopy_laneq_bf16(__p0_746, __p1_746, __p2_746, __p3_746) __extension__ ({ \ + bfloat16x4_t __ret_746; \ + bfloat16x4_t __s0_746 = __p0_746; \ + bfloat16x8_t __s2_746 = __p2_746; \ + bfloat16x4_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_746; __rev2_746 = __builtin_shufflevector(__s2_746, __s2_746, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_746 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_746, __p3_746), __rev0_746, __p1_746); \ + __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ + __ret_746; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = __a64_vcvtq_low_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_u32(__p0_747, __p1_747, __p2_747, __p3_747) __extension__ ({ \ + uint32x4_t __ret_747; \ + uint32x4_t __s0_747 = __p0_747; \ + uint8x16_t __s1_747 = __p1_747; \ + uint8x16_t __s2_747 = __p2_747; \ +uint8x16_t __reint_747 = __s2_747; \ +uint32x4_t __reint1_747 = splatq_laneq_u32(*(uint32x4_t *) &__reint_747, __p3_747); \ + __ret_747 = vdotq_u32(__s0_747, __s1_747, *(uint8x16_t *) &__reint1_747); \ + __ret_747; \ +}) +#else +#define vdotq_laneq_u32(__p0_748, __p1_748, __p2_748, __p3_748) __extension__ ({ \ + uint32x4_t __ret_748; \ + uint32x4_t __s0_748 = __p0_748; \ + uint8x16_t __s1_748 = __p1_748; \ + uint8x16_t __s2_748 = __p2_748; \ + uint32x4_t __rev0_748; __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \ + uint8x16_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_748; __rev2_748 = __builtin_shufflevector(__s2_748, __s2_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_748 = __rev2_748; \ +uint32x4_t __reint1_748 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_748, __p3_748); \ + __ret_748 = __noswap_vdotq_u32(__rev0_748, __rev1_748, *(uint8x16_t *) &__reint1_748); \ + __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 3, 2, 1, 0); \ + __ret_748; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_s32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \ + int32x4_t __ret_749; \ + int32x4_t __s0_749 = __p0_749; \ + int8x16_t __s1_749 = __p1_749; \ + int8x16_t __s2_749 = __p2_749; \ +int8x16_t __reint_749 = __s2_749; \ +int32x4_t __reint1_749 = splatq_laneq_s32(*(int32x4_t *) &__reint_749, __p3_749); \ + __ret_749 = vdotq_s32(__s0_749, __s1_749, *(int8x16_t *) &__reint1_749); \ + __ret_749; \ +}) +#else +#define vdotq_laneq_s32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \ + int32x4_t __ret_750; \ + int32x4_t __s0_750 = __p0_750; \ + int8x16_t __s1_750 = __p1_750; \ + int8x16_t __s2_750 = __p2_750; \ + int32x4_t __rev0_750; __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 3, 2, 1, 0); \ + int8x16_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_750; __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_750 = __rev2_750; \ +int32x4_t __reint1_750 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_750, __p3_750); \ + __ret_750 = __noswap_vdotq_s32(__rev0_750, __rev1_750, *(int8x16_t *) &__reint1_750); \ + __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \ + __ret_750; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_u32(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \ + uint32x2_t __ret_751; \ + uint32x2_t __s0_751 = __p0_751; \ + uint8x8_t __s1_751 = __p1_751; \ + uint8x16_t __s2_751 = __p2_751; \ +uint8x16_t __reint_751 = __s2_751; \ +uint32x2_t __reint1_751 = splat_laneq_u32(*(uint32x4_t *) &__reint_751, __p3_751); \ + __ret_751 = vdot_u32(__s0_751, __s1_751, *(uint8x8_t *) &__reint1_751); \ + __ret_751; \ +}) +#else +#define vdot_laneq_u32(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \ + uint32x2_t __ret_752; \ + uint32x2_t __s0_752 = __p0_752; \ + uint8x8_t __s1_752 = __p1_752; \ + uint8x16_t __s2_752 = __p2_752; \ + uint32x2_t __rev0_752; __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 1, 0); \ + uint8x8_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_752; __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_752 = __rev2_752; \ +uint32x2_t __reint1_752 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_752, __p3_752); \ + __ret_752 = __noswap_vdot_u32(__rev0_752, __rev1_752, *(uint8x8_t *) &__reint1_752); \ + __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 1, 0); \ + __ret_752; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \ + int32x2_t __ret_753; \ + int32x2_t __s0_753 = __p0_753; \ + int8x8_t __s1_753 = __p1_753; \ + int8x16_t __s2_753 = __p2_753; \ +int8x16_t __reint_753 = __s2_753; \ +int32x2_t __reint1_753 = splat_laneq_s32(*(int32x4_t *) &__reint_753, __p3_753); \ + __ret_753 = vdot_s32(__s0_753, __s1_753, *(int8x8_t *) &__reint1_753); \ + __ret_753; \ +}) +#else +#define vdot_laneq_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \ + int32x2_t __ret_754; \ + int32x2_t __s0_754 = __p0_754; \ + int8x8_t __s1_754 = __p1_754; \ + int8x16_t __s2_754 = __p2_754; \ + int32x2_t __rev0_754; __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \ + int8x8_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_754; __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_754 = __rev2_754; \ +int32x2_t __reint1_754 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_754, __p3_754); \ + __ret_754 = __noswap_vdot_s32(__rev0_754, __rev1_754, *(int8x8_t *) &__reint1_754); \ + __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \ + __ret_754; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_lane_f16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \ + float16_t __ret_755; \ + float16_t __s0_755 = __p0_755; \ + float16_t __s1_755 = __p1_755; \ + float16x4_t __s2_755 = __p2_755; \ + __ret_755 = vfmah_lane_f16(__s0_755, -__s1_755, __s2_755, __p3_755); \ + __ret_755; \ +}) +#else +#define vfmsh_lane_f16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \ + float16_t __ret_756; \ + float16_t __s0_756 = __p0_756; \ + float16_t __s1_756 = __p1_756; \ + float16x4_t __s2_756 = __p2_756; \ + float16x4_t __rev2_756; __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \ + __ret_756 = __noswap_vfmah_lane_f16(__s0_756, -__s1_756, __rev2_756, __p3_756); \ + __ret_756; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f16(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \ + float16x8_t __ret_757; \ + float16x8_t __s0_757 = __p0_757; \ + float16x8_t __s1_757 = __p1_757; \ + float16x4_t __s2_757 = __p2_757; \ + __ret_757 = vfmaq_lane_f16(__s0_757, -__s1_757, __s2_757, __p3_757); \ + __ret_757; \ +}) +#else +#define vfmsq_lane_f16(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \ + float16x8_t __ret_758; \ + float16x8_t __s0_758 = __p0_758; \ + float16x8_t __s1_758 = __p1_758; \ + float16x4_t __s2_758 = __p2_758; \ + float16x8_t __rev0_758; __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_758; __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 3, 2, 1, 0); \ + __ret_758 = __noswap_vfmaq_lane_f16(__rev0_758, -__rev1_758, __rev2_758, __p3_758); \ + __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_758; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \ + float16x4_t __ret_759; \ + float16x4_t __s0_759 = __p0_759; \ + float16x4_t __s1_759 = __p1_759; \ + float16x4_t __s2_759 = __p2_759; \ + __ret_759 = vfma_lane_f16(__s0_759, -__s1_759, __s2_759, __p3_759); \ + __ret_759; \ +}) +#else +#define vfms_lane_f16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ + float16x4_t __ret_760; \ + float16x4_t __s0_760 = __p0_760; \ + float16x4_t __s1_760 = __p1_760; \ + float16x4_t __s2_760 = __p2_760; \ + float16x4_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \ + float16x4_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \ + float16x4_t __rev2_760; __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \ + __ret_760 = __noswap_vfma_lane_f16(__rev0_760, -__rev1_760, __rev2_760, __p3_760); \ + __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \ + __ret_760; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_laneq_f16(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ + float16_t __ret_761; \ + float16_t __s0_761 = __p0_761; \ + float16_t __s1_761 = __p1_761; \ + float16x8_t __s2_761 = __p2_761; \ + __ret_761 = vfmah_laneq_f16(__s0_761, -__s1_761, __s2_761, __p3_761); \ + __ret_761; \ +}) +#else +#define vfmsh_laneq_f16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ + float16_t __ret_762; \ + float16_t __s0_762 = __p0_762; \ + float16_t __s1_762 = __p1_762; \ + float16x8_t __s2_762 = __p2_762; \ + float16x8_t __rev2_762; __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_762 = __noswap_vfmah_laneq_f16(__s0_762, -__s1_762, __rev2_762, __p3_762); \ + __ret_762; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ + float16x8_t __ret_763; \ + float16x8_t __s0_763 = __p0_763; \ + float16x8_t __s1_763 = __p1_763; \ + float16x8_t __s2_763 = __p2_763; \ + __ret_763 = vfmaq_laneq_f16(__s0_763, -__s1_763, __s2_763, __p3_763); \ + __ret_763; \ +}) +#else +#define vfmsq_laneq_f16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ + float16x8_t __ret_764; \ + float16x8_t __s0_764 = __p0_764; \ + float16x8_t __s1_764 = __p1_764; \ + float16x8_t __s2_764 = __p2_764; \ + float16x8_t __rev0_764; __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_764; __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_764 = __noswap_vfmaq_laneq_f16(__rev0_764, -__rev1_764, __rev2_764, __p3_764); \ + __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_764; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f16(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ + float16x4_t __ret_765; \ + float16x4_t __s0_765 = __p0_765; \ + float16x4_t __s1_765 = __p1_765; \ + float16x8_t __s2_765 = __p2_765; \ + __ret_765 = vfma_laneq_f16(__s0_765, -__s1_765, __s2_765, __p3_765); \ + __ret_765; \ +}) +#else +#define vfms_laneq_f16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ + float16x4_t __ret_766; \ + float16x4_t __s0_766 = __p0_766; \ + float16x4_t __s1_766 = __p1_766; \ + float16x8_t __s2_766 = __p2_766; \ + float16x4_t __rev0_766; __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 3, 2, 1, 0); \ + float16x4_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \ + float16x8_t __rev2_766; __rev2_766 = __builtin_shufflevector(__s2_766, __s2_766, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_766 = __noswap_vfma_laneq_f16(__rev0_766, -__rev1_766, __rev2_766, __p3_766); \ + __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \ + __ret_766; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \ + float16x8_t __ret_767; \ + float16x8_t __s0_767 = __p0_767; \ + float16x8_t __s1_767 = __p1_767; \ + __ret_767 = __s0_767 * splatq_laneq_f16(__s1_767, __p2_767); \ + __ret_767; \ +}) +#else +#define vmulq_laneq_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \ + float16x8_t __ret_768; \ + float16x8_t __s0_768 = __p0_768; \ + float16x8_t __s1_768 = __p1_768; \ + float16x8_t __rev0_768; __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_768 = __rev0_768 * __noswap_splatq_laneq_f16(__rev1_768, __p2_768); \ + __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_768; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f16(__p0_769, __p1_769, __p2_769) __extension__ ({ \ + float16x4_t __ret_769; \ + float16x4_t __s0_769 = __p0_769; \ + float16x8_t __s1_769 = __p1_769; \ + __ret_769 = __s0_769 * splat_laneq_f16(__s1_769, __p2_769); \ + __ret_769; \ +}) +#else +#define vmul_laneq_f16(__p0_770, __p1_770, __p2_770) __extension__ ({ \ + float16x4_t __ret_770; \ + float16x4_t __s0_770 = __p0_770; \ + float16x8_t __s1_770 = __p1_770; \ + float16x4_t __rev0_770; __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \ + float16x8_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_770 = __rev0_770 * __noswap_splat_laneq_f16(__rev1_770, __p2_770); \ + __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \ + __ret_770; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f16(__p0_771, __p1_771, __p2_771) __extension__ ({ \ + float16x8_t __ret_771; \ + float16x8_t __s0_771 = __p0_771; \ + float16x4_t __s1_771 = __p1_771; \ + __ret_771 = vmulxq_f16(__s0_771, splatq_lane_f16(__s1_771, __p2_771)); \ + __ret_771; \ +}) +#else +#define vmulxq_lane_f16(__p0_772, __p1_772, __p2_772) __extension__ ({ \ + float16x8_t __ret_772; \ + float16x8_t __s0_772 = __p0_772; \ + float16x4_t __s1_772 = __p1_772; \ + float16x8_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \ + __ret_772 = __noswap_vmulxq_f16(__rev0_772, __noswap_splatq_lane_f16(__rev1_772, __p2_772)); \ + __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_772; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f16(__p0_773, __p1_773, __p2_773) __extension__ ({ \ + float16x4_t __ret_773; \ + float16x4_t __s0_773 = __p0_773; \ + float16x4_t __s1_773 = __p1_773; \ + __ret_773 = vmulx_f16(__s0_773, splat_lane_f16(__s1_773, __p2_773)); \ + __ret_773; \ +}) +#else +#define vmulx_lane_f16(__p0_774, __p1_774, __p2_774) __extension__ ({ \ + float16x4_t __ret_774; \ + float16x4_t __s0_774 = __p0_774; \ + float16x4_t __s1_774 = __p1_774; \ + float16x4_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \ + float16x4_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \ + __ret_774 = __noswap_vmulx_f16(__rev0_774, __noswap_splat_lane_f16(__rev1_774, __p2_774)); \ + __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \ + __ret_774; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f16(__p0_775, __p1_775, __p2_775) __extension__ ({ \ + float16x8_t __ret_775; \ + float16x8_t __s0_775 = __p0_775; \ + float16x8_t __s1_775 = __p1_775; \ + __ret_775 = vmulxq_f16(__s0_775, splatq_laneq_f16(__s1_775, __p2_775)); \ + __ret_775; \ +}) +#else +#define vmulxq_laneq_f16(__p0_776, __p1_776, __p2_776) __extension__ ({ \ + float16x8_t __ret_776; \ + float16x8_t __s0_776 = __p0_776; \ + float16x8_t __s1_776 = __p1_776; \ + float16x8_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_776 = __noswap_vmulxq_f16(__rev0_776, __noswap_splatq_laneq_f16(__rev1_776, __p2_776)); \ + __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_776; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f16(__p0_777, __p1_777, __p2_777) __extension__ ({ \ + float16x4_t __ret_777; \ + float16x4_t __s0_777 = __p0_777; \ + float16x8_t __s1_777 = __p1_777; \ + __ret_777 = vmulx_f16(__s0_777, splat_laneq_f16(__s1_777, __p2_777)); \ + __ret_777; \ +}) +#else +#define vmulx_laneq_f16(__p0_778, __p1_778, __p2_778) __extension__ ({ \ + float16x4_t __ret_778; \ + float16x4_t __s0_778 = __p0_778; \ + float16x8_t __s1_778 = __p1_778; \ + float16x4_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \ + float16x8_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_778 = __noswap_vmulx_f16(__rev0_778, __noswap_splat_laneq_f16(__rev1_778, __p2_778)); \ + __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \ + __ret_778; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_laneq_s32(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ + int32x4_t __ret_779; \ + int32x4_t __s0_779 = __p0_779; \ + int8x16_t __s1_779 = __p1_779; \ + uint8x16_t __s2_779 = __p2_779; \ +uint8x16_t __reint_779 = __s2_779; \ + __ret_779 = vusdotq_s32(__s0_779, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_779, __p3_779)), __s1_779); \ + __ret_779; \ +}) +#else +#define vsudotq_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ + int32x4_t __ret_780; \ + int32x4_t __s0_780 = __p0_780; \ + int8x16_t __s1_780 = __p1_780; \ + uint8x16_t __s2_780 = __p2_780; \ + int32x4_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \ + int8x16_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_780; __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_780 = __rev2_780; \ + __ret_780 = __noswap_vusdotq_s32(__rev0_780, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_780, __p3_780)), __rev1_780); \ + __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \ + __ret_780; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ + int32x2_t __ret_781; \ + int32x2_t __s0_781 = __p0_781; \ + int8x8_t __s1_781 = __p1_781; \ + uint8x16_t __s2_781 = __p2_781; \ +uint8x16_t __reint_781 = __s2_781; \ + __ret_781 = vusdot_s32(__s0_781, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_781, __p3_781)), __s1_781); \ + __ret_781; \ +}) +#else +#define vsudot_laneq_s32(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ + int32x2_t __ret_782; \ + int32x2_t __s0_782 = __p0_782; \ + int8x8_t __s1_782 = __p1_782; \ + uint8x16_t __s2_782 = __p2_782; \ + int32x2_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \ + int8x8_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_782; __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_782 = __rev2_782; \ + __ret_782 = __noswap_vusdot_s32(__rev0_782, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_782, __p3_782)), __rev1_782); \ + __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 1, 0); \ + __ret_782; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_laneq_s32(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ + int32x4_t __ret_783; \ + int32x4_t __s0_783 = __p0_783; \ + uint8x16_t __s1_783 = __p1_783; \ + int8x16_t __s2_783 = __p2_783; \ +int8x16_t __reint_783 = __s2_783; \ + __ret_783 = vusdotq_s32(__s0_783, __s1_783, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_783, __p3_783))); \ + __ret_783; \ +}) +#else +#define vusdotq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ + int32x4_t __ret_784; \ + int32x4_t __s0_784 = __p0_784; \ + uint8x16_t __s1_784 = __p1_784; \ + int8x16_t __s2_784 = __p2_784; \ + int32x4_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \ + uint8x16_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_784; __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_784 = __rev2_784; \ + __ret_784 = __noswap_vusdotq_s32(__rev0_784, __rev1_784, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_784, __p3_784))); \ + __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 3, 2, 1, 0); \ + __ret_784; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ + int32x2_t __ret_785; \ + int32x2_t __s0_785 = __p0_785; \ + uint8x8_t __s1_785 = __p1_785; \ + int8x16_t __s2_785 = __p2_785; \ +int8x16_t __reint_785 = __s2_785; \ + __ret_785 = vusdot_s32(__s0_785, __s1_785, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_785, __p3_785))); \ + __ret_785; \ +}) +#else +#define vusdot_laneq_s32(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ + int32x2_t __ret_786; \ + int32x2_t __s0_786 = __p0_786; \ + uint8x8_t __s1_786 = __p1_786; \ + int8x16_t __s2_786 = __p2_786; \ + int32x2_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \ + uint8x8_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_786; __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_786 = __rev2_786; \ + __ret_786 = __noswap_vusdot_s32(__rev0_786, __rev1_786, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_786, __p3_786))); \ + __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 1, 0); \ + __ret_786; \ +}) +#endif + +#define vldap1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vldap1_lane_p64(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vldap1q_lane_p64(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vldap1q_lane_p64(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vldap1q_lane_u64(__p0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vldap1q_lane_u64(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vldap1q_lane_f64(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vldap1q_lane_f64(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vldap1q_lane_s64(__p0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vldap1q_lane_s64(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vldap1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vldap1_lane_u64(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#define vldap1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vldap1_lane_f64(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#define vldap1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vldap1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#define vstl1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_p64(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_p64(__p0, (int8x16_t)__s1, __p2, 38); \ +}) +#else +#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_p64(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_u64(__p0, (int8x16_t)__s1, __p2, 51); \ +}) +#else +#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_u64(__p0, (int8x16_t)__rev1, __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_f64(__p0, (int8x16_t)__s1, __p2, 42); \ +}) +#else +#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_f64(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_s64(__p0, (int8x16_t)__s1, __p2, 35); \ +}) +#else +#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_s64(__p0, (int8x16_t)__rev1, __p2, 35); \ +}) +#endif + +#define vstl1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_u64(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#define vstl1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_f64(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#define vstl1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vxarq_u64((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vxarq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.1a"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_lane_s32(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ + int32_t __ret_787; \ + int32_t __s0_787 = __p0_787; \ + int32_t __s1_787 = __p1_787; \ + int32x2_t __s2_787 = __p2_787; \ + __ret_787 = vqrdmlahs_s32(__s0_787, __s1_787, vget_lane_s32(__s2_787, __p3_787)); \ + __ret_787; \ +}) +#else +#define vqrdmlahs_lane_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ + int32_t __ret_788; \ + int32_t __s0_788 = __p0_788; \ + int32_t __s1_788 = __p1_788; \ + int32x2_t __s2_788 = __p2_788; \ + int32x2_t __rev2_788; __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 1, 0); \ + __ret_788 = vqrdmlahs_s32(__s0_788, __s1_788, __noswap_vget_lane_s32(__rev2_788, __p3_788)); \ + __ret_788; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_lane_s16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ + int16_t __ret_789; \ + int16_t __s0_789 = __p0_789; \ + int16_t __s1_789 = __p1_789; \ + int16x4_t __s2_789 = __p2_789; \ + __ret_789 = vqrdmlahh_s16(__s0_789, __s1_789, vget_lane_s16(__s2_789, __p3_789)); \ + __ret_789; \ +}) +#else +#define vqrdmlahh_lane_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ + int16_t __ret_790; \ + int16_t __s0_790 = __p0_790; \ + int16_t __s1_790 = __p1_790; \ + int16x4_t __s2_790 = __p2_790; \ + int16x4_t __rev2_790; __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 3, 2, 1, 0); \ + __ret_790 = vqrdmlahh_s16(__s0_790, __s1_790, __noswap_vget_lane_s16(__rev2_790, __p3_790)); \ + __ret_790; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_laneq_s32(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ + int32_t __ret_791; \ + int32_t __s0_791 = __p0_791; \ + int32_t __s1_791 = __p1_791; \ + int32x4_t __s2_791 = __p2_791; \ + __ret_791 = vqrdmlahs_s32(__s0_791, __s1_791, vgetq_lane_s32(__s2_791, __p3_791)); \ + __ret_791; \ +}) +#else +#define vqrdmlahs_laneq_s32(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ + int32_t __ret_792; \ + int32_t __s0_792 = __p0_792; \ + int32_t __s1_792 = __p1_792; \ + int32x4_t __s2_792 = __p2_792; \ + int32x4_t __rev2_792; __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 3, 2, 1, 0); \ + __ret_792 = vqrdmlahs_s32(__s0_792, __s1_792, __noswap_vgetq_lane_s32(__rev2_792, __p3_792)); \ + __ret_792; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_laneq_s16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ + int16_t __ret_793; \ + int16_t __s0_793 = __p0_793; \ + int16_t __s1_793 = __p1_793; \ + int16x8_t __s2_793 = __p2_793; \ + __ret_793 = vqrdmlahh_s16(__s0_793, __s1_793, vgetq_lane_s16(__s2_793, __p3_793)); \ + __ret_793; \ +}) +#else +#define vqrdmlahh_laneq_s16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ + int16_t __ret_794; \ + int16_t __s0_794 = __p0_794; \ + int16_t __s1_794 = __p1_794; \ + int16x8_t __s2_794 = __p2_794; \ + int16x8_t __rev2_794; __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_794 = vqrdmlahh_s16(__s0_794, __s1_794, __noswap_vgetq_lane_s16(__rev2_794, __p3_794)); \ + __ret_794; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s32(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ + int32x4_t __ret_795; \ + int32x4_t __s0_795 = __p0_795; \ + int32x4_t __s1_795 = __p1_795; \ + int32x4_t __s2_795 = __p2_795; \ + __ret_795 = vqrdmlahq_s32(__s0_795, __s1_795, splatq_laneq_s32(__s2_795, __p3_795)); \ + __ret_795; \ +}) +#else +#define vqrdmlahq_laneq_s32(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ + int32x4_t __ret_796; \ + int32x4_t __s0_796 = __p0_796; \ + int32x4_t __s1_796 = __p1_796; \ + int32x4_t __s2_796 = __p2_796; \ + int32x4_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \ + int32x4_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \ + int32x4_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \ + __ret_796 = __noswap_vqrdmlahq_s32(__rev0_796, __rev1_796, __noswap_splatq_laneq_s32(__rev2_796, __p3_796)); \ + __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 3, 2, 1, 0); \ + __ret_796; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ + int16x8_t __ret_797; \ + int16x8_t __s0_797 = __p0_797; \ + int16x8_t __s1_797 = __p1_797; \ + int16x8_t __s2_797 = __p2_797; \ + __ret_797 = vqrdmlahq_s16(__s0_797, __s1_797, splatq_laneq_s16(__s2_797, __p3_797)); \ + __ret_797; \ +}) +#else +#define vqrdmlahq_laneq_s16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ + int16x8_t __ret_798; \ + int16x8_t __s0_798 = __p0_798; \ + int16x8_t __s1_798 = __p1_798; \ + int16x8_t __s2_798 = __p2_798; \ + int16x8_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_798 = __noswap_vqrdmlahq_s16(__rev0_798, __rev1_798, __noswap_splatq_laneq_s16(__rev2_798, __p3_798)); \ + __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_798; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s32(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ + int32x2_t __ret_799; \ + int32x2_t __s0_799 = __p0_799; \ + int32x2_t __s1_799 = __p1_799; \ + int32x4_t __s2_799 = __p2_799; \ + __ret_799 = vqrdmlah_s32(__s0_799, __s1_799, splat_laneq_s32(__s2_799, __p3_799)); \ + __ret_799; \ +}) +#else +#define vqrdmlah_laneq_s32(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ + int32x2_t __ret_800; \ + int32x2_t __s0_800 = __p0_800; \ + int32x2_t __s1_800 = __p1_800; \ + int32x4_t __s2_800 = __p2_800; \ + int32x2_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \ + int32x2_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \ + int32x4_t __rev2_800; __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \ + __ret_800 = __noswap_vqrdmlah_s32(__rev0_800, __rev1_800, __noswap_splat_laneq_s32(__rev2_800, __p3_800)); \ + __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \ + __ret_800; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ + int16x4_t __ret_801; \ + int16x4_t __s0_801 = __p0_801; \ + int16x4_t __s1_801 = __p1_801; \ + int16x8_t __s2_801 = __p2_801; \ + __ret_801 = vqrdmlah_s16(__s0_801, __s1_801, splat_laneq_s16(__s2_801, __p3_801)); \ + __ret_801; \ +}) +#else +#define vqrdmlah_laneq_s16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ + int16x4_t __ret_802; \ + int16x4_t __s0_802 = __p0_802; \ + int16x4_t __s1_802 = __p1_802; \ + int16x8_t __s2_802 = __p2_802; \ + int16x4_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \ + int16x4_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \ + int16x8_t __rev2_802; __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_802 = __noswap_vqrdmlah_s16(__rev0_802, __rev1_802, __noswap_splat_laneq_s16(__rev2_802, __p3_802)); \ + __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \ + __ret_802; \ +}) +#endif + +__ai __attribute__((target("v8.1a"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_lane_s32(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ + int32_t __ret_803; \ + int32_t __s0_803 = __p0_803; \ + int32_t __s1_803 = __p1_803; \ + int32x2_t __s2_803 = __p2_803; \ + __ret_803 = vqrdmlshs_s32(__s0_803, __s1_803, vget_lane_s32(__s2_803, __p3_803)); \ + __ret_803; \ +}) +#else +#define vqrdmlshs_lane_s32(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ + int32_t __ret_804; \ + int32_t __s0_804 = __p0_804; \ + int32_t __s1_804 = __p1_804; \ + int32x2_t __s2_804 = __p2_804; \ + int32x2_t __rev2_804; __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 1, 0); \ + __ret_804 = vqrdmlshs_s32(__s0_804, __s1_804, __noswap_vget_lane_s32(__rev2_804, __p3_804)); \ + __ret_804; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_lane_s16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ + int16_t __ret_805; \ + int16_t __s0_805 = __p0_805; \ + int16_t __s1_805 = __p1_805; \ + int16x4_t __s2_805 = __p2_805; \ + __ret_805 = vqrdmlshh_s16(__s0_805, __s1_805, vget_lane_s16(__s2_805, __p3_805)); \ + __ret_805; \ +}) +#else +#define vqrdmlshh_lane_s16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ + int16_t __ret_806; \ + int16_t __s0_806 = __p0_806; \ + int16_t __s1_806 = __p1_806; \ + int16x4_t __s2_806 = __p2_806; \ + int16x4_t __rev2_806; __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 3, 2, 1, 0); \ + __ret_806 = vqrdmlshh_s16(__s0_806, __s1_806, __noswap_vget_lane_s16(__rev2_806, __p3_806)); \ + __ret_806; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_laneq_s32(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ + int32_t __ret_807; \ + int32_t __s0_807 = __p0_807; \ + int32_t __s1_807 = __p1_807; \ + int32x4_t __s2_807 = __p2_807; \ + __ret_807 = vqrdmlshs_s32(__s0_807, __s1_807, vgetq_lane_s32(__s2_807, __p3_807)); \ + __ret_807; \ +}) +#else +#define vqrdmlshs_laneq_s32(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ + int32_t __ret_808; \ + int32_t __s0_808 = __p0_808; \ + int32_t __s1_808 = __p1_808; \ + int32x4_t __s2_808 = __p2_808; \ + int32x4_t __rev2_808; __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 3, 2, 1, 0); \ + __ret_808 = vqrdmlshs_s32(__s0_808, __s1_808, __noswap_vgetq_lane_s32(__rev2_808, __p3_808)); \ + __ret_808; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_laneq_s16(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ + int16_t __ret_809; \ + int16_t __s0_809 = __p0_809; \ + int16_t __s1_809 = __p1_809; \ + int16x8_t __s2_809 = __p2_809; \ + __ret_809 = vqrdmlshh_s16(__s0_809, __s1_809, vgetq_lane_s16(__s2_809, __p3_809)); \ + __ret_809; \ +}) +#else +#define vqrdmlshh_laneq_s16(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ + int16_t __ret_810; \ + int16_t __s0_810 = __p0_810; \ + int16_t __s1_810 = __p1_810; \ + int16x8_t __s2_810 = __p2_810; \ + int16x8_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_810 = vqrdmlshh_s16(__s0_810, __s1_810, __noswap_vgetq_lane_s16(__rev2_810, __p3_810)); \ + __ret_810; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s32(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ + int32x4_t __ret_811; \ + int32x4_t __s0_811 = __p0_811; \ + int32x4_t __s1_811 = __p1_811; \ + int32x4_t __s2_811 = __p2_811; \ + __ret_811 = vqrdmlshq_s32(__s0_811, __s1_811, splatq_laneq_s32(__s2_811, __p3_811)); \ + __ret_811; \ +}) +#else +#define vqrdmlshq_laneq_s32(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ + int32x4_t __ret_812; \ + int32x4_t __s0_812 = __p0_812; \ + int32x4_t __s1_812 = __p1_812; \ + int32x4_t __s2_812 = __p2_812; \ + int32x4_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \ + int32x4_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 3, 2, 1, 0); \ + int32x4_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 3, 2, 1, 0); \ + __ret_812 = __noswap_vqrdmlshq_s32(__rev0_812, __rev1_812, __noswap_splatq_laneq_s32(__rev2_812, __p3_812)); \ + __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 3, 2, 1, 0); \ + __ret_812; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s16(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ + int16x8_t __ret_813; \ + int16x8_t __s0_813 = __p0_813; \ + int16x8_t __s1_813 = __p1_813; \ + int16x8_t __s2_813 = __p2_813; \ + __ret_813 = vqrdmlshq_s16(__s0_813, __s1_813, splatq_laneq_s16(__s2_813, __p3_813)); \ + __ret_813; \ +}) +#else +#define vqrdmlshq_laneq_s16(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ + int16x8_t __ret_814; \ + int16x8_t __s0_814 = __p0_814; \ + int16x8_t __s1_814 = __p1_814; \ + int16x8_t __s2_814 = __p2_814; \ + int16x8_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_814; __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_814; __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_814 = __noswap_vqrdmlshq_s16(__rev0_814, __rev1_814, __noswap_splatq_laneq_s16(__rev2_814, __p3_814)); \ + __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_814; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ + int32x2_t __ret_815; \ + int32x2_t __s0_815 = __p0_815; \ + int32x2_t __s1_815 = __p1_815; \ + int32x4_t __s2_815 = __p2_815; \ + __ret_815 = vqrdmlsh_s32(__s0_815, __s1_815, splat_laneq_s32(__s2_815, __p3_815)); \ + __ret_815; \ +}) +#else +#define vqrdmlsh_laneq_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ + int32x2_t __ret_816; \ + int32x2_t __s0_816 = __p0_816; \ + int32x2_t __s1_816 = __p1_816; \ + int32x4_t __s2_816 = __p2_816; \ + int32x2_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \ + int32x2_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \ + int32x4_t __rev2_816; __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 3, 2, 1, 0); \ + __ret_816 = __noswap_vqrdmlsh_s32(__rev0_816, __rev1_816, __noswap_splat_laneq_s32(__rev2_816, __p3_816)); \ + __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \ + __ret_816; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s16(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ + int16x4_t __ret_817; \ + int16x4_t __s0_817 = __p0_817; \ + int16x4_t __s1_817 = __p1_817; \ + int16x8_t __s2_817 = __p2_817; \ + __ret_817 = vqrdmlsh_s16(__s0_817, __s1_817, splat_laneq_s16(__s2_817, __p3_817)); \ + __ret_817; \ +}) +#else +#define vqrdmlsh_laneq_s16(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ + int16x4_t __ret_818; \ + int16x4_t __s0_818 = __p0_818; \ + int16x4_t __s1_818 = __p1_818; \ + int16x8_t __s2_818 = __p2_818; \ + int16x4_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 3, 2, 1, 0); \ + int16x4_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 3, 2, 1, 0); \ + int16x8_t __rev2_818; __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_818 = __noswap_vqrdmlsh_s16(__rev0_818, __rev1_818, __noswap_splat_laneq_s16(__rev2_818, __p3_818)); \ + __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \ + __ret_818; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_lane_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ + float64x1_t __ret_819; \ + float64x1_t __s0_819 = __p0_819; \ + float64x1_t __s1_819 = __p1_819; \ + float64x1_t __s2_819 = __p2_819; \ +float64x1_t __reint_819 = __s2_819; \ +uint64x2_t __reint1_819 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ + __ret_819 = vcmla_f64(__s0_819, __s1_819, *(float64x1_t *) &__reint1_819); \ + __ret_819; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f64(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ + float64x2_t __ret_820; \ + float64x2_t __s0_820 = __p0_820; \ + float64x2_t __s1_820 = __p1_820; \ + float64x1_t __s2_820 = __p2_820; \ +float64x1_t __reint_820 = __s2_820; \ +uint64x2_t __reint1_820 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820), vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820)}; \ + __ret_820 = vcmlaq_f64(__s0_820, __s1_820, *(float64x2_t *) &__reint1_820); \ + __ret_820; \ +}) +#else +#define vcmlaq_lane_f64(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ + float64x2_t __ret_821; \ + float64x2_t __s0_821 = __p0_821; \ + float64x2_t __s1_821 = __p1_821; \ + float64x1_t __s2_821 = __p2_821; \ + float64x2_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 1, 0); \ + float64x2_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 1, 0); \ +float64x1_t __reint_821 = __s2_821; \ +uint64x2_t __reint1_821 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821)}; \ + __ret_821 = __noswap_vcmlaq_f64(__rev0_821, __rev1_821, *(float64x2_t *) &__reint1_821); \ + __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 1, 0); \ + __ret_821; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f64(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ + float64x1_t __ret_822; \ + float64x1_t __s0_822 = __p0_822; \ + float64x1_t __s1_822 = __p1_822; \ + float64x2_t __s2_822 = __p2_822; \ +float64x2_t __reint_822 = __s2_822; \ +uint64x2_t __reint1_822 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822), vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822)}; \ + __ret_822 = vcmla_f64(__s0_822, __s1_822, *(float64x1_t *) &__reint1_822); \ + __ret_822; \ +}) +#else +#define vcmla_laneq_f64(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ + float64x1_t __ret_823; \ + float64x1_t __s0_823 = __p0_823; \ + float64x1_t __s1_823 = __p1_823; \ + float64x2_t __s2_823 = __p2_823; \ + float64x2_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 1, 0); \ +float64x2_t __reint_823 = __rev2_823; \ +uint64x2_t __reint1_823 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823)}; \ + __ret_823 = vcmla_f64(__s0_823, __s1_823, *(float64x1_t *) &__reint1_823); \ + __ret_823; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f64(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ + float64x2_t __ret_824; \ + float64x2_t __s0_824 = __p0_824; \ + float64x2_t __s1_824 = __p1_824; \ + float64x2_t __s2_824 = __p2_824; \ +float64x2_t __reint_824 = __s2_824; \ +uint64x2_t __reint1_824 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824), vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824)}; \ + __ret_824 = vcmlaq_f64(__s0_824, __s1_824, *(float64x2_t *) &__reint1_824); \ + __ret_824; \ +}) +#else +#define vcmlaq_laneq_f64(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ + float64x2_t __ret_825; \ + float64x2_t __s0_825 = __p0_825; \ + float64x2_t __s1_825 = __p1_825; \ + float64x2_t __s2_825 = __p2_825; \ + float64x2_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 1, 0); \ + float64x2_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 1, 0); \ + float64x2_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 1, 0); \ +float64x2_t __reint_825 = __rev2_825; \ +uint64x2_t __reint1_825 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825)}; \ + __ret_825 = __noswap_vcmlaq_f64(__rev0_825, __rev1_825, *(float64x2_t *) &__reint1_825); \ + __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 1, 0); \ + __ret_825; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot180_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot180_lane_f64(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ + float64x1_t __ret_826; \ + float64x1_t __s0_826 = __p0_826; \ + float64x1_t __s1_826 = __p1_826; \ + float64x1_t __s2_826 = __p2_826; \ +float64x1_t __reint_826 = __s2_826; \ +uint64x2_t __reint1_826 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826), vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826)}; \ + __ret_826 = vcmla_rot180_f64(__s0_826, __s1_826, *(float64x1_t *) &__reint1_826); \ + __ret_826; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f64(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ + float64x2_t __ret_827; \ + float64x2_t __s0_827 = __p0_827; \ + float64x2_t __s1_827 = __p1_827; \ + float64x1_t __s2_827 = __p2_827; \ +float64x1_t __reint_827 = __s2_827; \ +uint64x2_t __reint1_827 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827), vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827)}; \ + __ret_827 = vcmlaq_rot180_f64(__s0_827, __s1_827, *(float64x2_t *) &__reint1_827); \ + __ret_827; \ +}) +#else +#define vcmlaq_rot180_lane_f64(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ + float64x2_t __ret_828; \ + float64x2_t __s0_828 = __p0_828; \ + float64x2_t __s1_828 = __p1_828; \ + float64x1_t __s2_828 = __p2_828; \ + float64x2_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 1, 0); \ + float64x2_t __rev1_828; __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 1, 0); \ +float64x1_t __reint_828 = __s2_828; \ +uint64x2_t __reint1_828 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828)}; \ + __ret_828 = __noswap_vcmlaq_rot180_f64(__rev0_828, __rev1_828, *(float64x2_t *) &__reint1_828); \ + __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \ + __ret_828; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f64(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ + float64x1_t __ret_829; \ + float64x1_t __s0_829 = __p0_829; \ + float64x1_t __s1_829 = __p1_829; \ + float64x2_t __s2_829 = __p2_829; \ +float64x2_t __reint_829 = __s2_829; \ +uint64x2_t __reint1_829 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829), vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829)}; \ + __ret_829 = vcmla_rot180_f64(__s0_829, __s1_829, *(float64x1_t *) &__reint1_829); \ + __ret_829; \ +}) +#else +#define vcmla_rot180_laneq_f64(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ + float64x1_t __ret_830; \ + float64x1_t __s0_830 = __p0_830; \ + float64x1_t __s1_830 = __p1_830; \ + float64x2_t __s2_830 = __p2_830; \ + float64x2_t __rev2_830; __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 1, 0); \ +float64x2_t __reint_830 = __rev2_830; \ +uint64x2_t __reint1_830 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830)}; \ + __ret_830 = vcmla_rot180_f64(__s0_830, __s1_830, *(float64x1_t *) &__reint1_830); \ + __ret_830; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f64(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ + float64x2_t __ret_831; \ + float64x2_t __s0_831 = __p0_831; \ + float64x2_t __s1_831 = __p1_831; \ + float64x2_t __s2_831 = __p2_831; \ +float64x2_t __reint_831 = __s2_831; \ +uint64x2_t __reint1_831 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831), vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831)}; \ + __ret_831 = vcmlaq_rot180_f64(__s0_831, __s1_831, *(float64x2_t *) &__reint1_831); \ + __ret_831; \ +}) +#else +#define vcmlaq_rot180_laneq_f64(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ + float64x2_t __ret_832; \ + float64x2_t __s0_832 = __p0_832; \ + float64x2_t __s1_832 = __p1_832; \ + float64x2_t __s2_832 = __p2_832; \ + float64x2_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 1, 0); \ + float64x2_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 1, 0); \ + float64x2_t __rev2_832; __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 1, 0); \ +float64x2_t __reint_832 = __rev2_832; \ +uint64x2_t __reint1_832 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832)}; \ + __ret_832 = __noswap_vcmlaq_rot180_f64(__rev0_832, __rev1_832, *(float64x2_t *) &__reint1_832); \ + __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 1, 0); \ + __ret_832; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot270_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot270_lane_f64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ + float64x1_t __ret_833; \ + float64x1_t __s0_833 = __p0_833; \ + float64x1_t __s1_833 = __p1_833; \ + float64x1_t __s2_833 = __p2_833; \ +float64x1_t __reint_833 = __s2_833; \ +uint64x2_t __reint1_833 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833), vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833)}; \ + __ret_833 = vcmla_rot270_f64(__s0_833, __s1_833, *(float64x1_t *) &__reint1_833); \ + __ret_833; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ + float64x2_t __ret_834; \ + float64x2_t __s0_834 = __p0_834; \ + float64x2_t __s1_834 = __p1_834; \ + float64x1_t __s2_834 = __p2_834; \ +float64x1_t __reint_834 = __s2_834; \ +uint64x2_t __reint1_834 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834), vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834)}; \ + __ret_834 = vcmlaq_rot270_f64(__s0_834, __s1_834, *(float64x2_t *) &__reint1_834); \ + __ret_834; \ +}) +#else +#define vcmlaq_rot270_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ + float64x2_t __ret_835; \ + float64x2_t __s0_835 = __p0_835; \ + float64x2_t __s1_835 = __p1_835; \ + float64x1_t __s2_835 = __p2_835; \ + float64x2_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 1, 0); \ + float64x2_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 1, 0); \ +float64x1_t __reint_835 = __s2_835; \ +uint64x2_t __reint1_835 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835)}; \ + __ret_835 = __noswap_vcmlaq_rot270_f64(__rev0_835, __rev1_835, *(float64x2_t *) &__reint1_835); \ + __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 1, 0); \ + __ret_835; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \ + float64x1_t __ret_836; \ + float64x1_t __s0_836 = __p0_836; \ + float64x1_t __s1_836 = __p1_836; \ + float64x2_t __s2_836 = __p2_836; \ +float64x2_t __reint_836 = __s2_836; \ +uint64x2_t __reint1_836 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836), vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836)}; \ + __ret_836 = vcmla_rot270_f64(__s0_836, __s1_836, *(float64x1_t *) &__reint1_836); \ + __ret_836; \ +}) +#else +#define vcmla_rot270_laneq_f64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \ + float64x1_t __ret_837; \ + float64x1_t __s0_837 = __p0_837; \ + float64x1_t __s1_837 = __p1_837; \ + float64x2_t __s2_837 = __p2_837; \ + float64x2_t __rev2_837; __rev2_837 = __builtin_shufflevector(__s2_837, __s2_837, 1, 0); \ +float64x2_t __reint_837 = __rev2_837; \ +uint64x2_t __reint1_837 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837)}; \ + __ret_837 = vcmla_rot270_f64(__s0_837, __s1_837, *(float64x1_t *) &__reint1_837); \ + __ret_837; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \ + float64x2_t __ret_838; \ + float64x2_t __s0_838 = __p0_838; \ + float64x2_t __s1_838 = __p1_838; \ + float64x2_t __s2_838 = __p2_838; \ +float64x2_t __reint_838 = __s2_838; \ +uint64x2_t __reint1_838 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838), vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838)}; \ + __ret_838 = vcmlaq_rot270_f64(__s0_838, __s1_838, *(float64x2_t *) &__reint1_838); \ + __ret_838; \ +}) +#else +#define vcmlaq_rot270_laneq_f64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ + float64x2_t __ret_839; \ + float64x2_t __s0_839 = __p0_839; \ + float64x2_t __s1_839 = __p1_839; \ + float64x2_t __s2_839 = __p2_839; \ + float64x2_t __rev0_839; __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 1, 0); \ + float64x2_t __rev1_839; __rev1_839 = __builtin_shufflevector(__s1_839, __s1_839, 1, 0); \ + float64x2_t __rev2_839; __rev2_839 = __builtin_shufflevector(__s2_839, __s2_839, 1, 0); \ +float64x2_t __reint_839 = __rev2_839; \ +uint64x2_t __reint1_839 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839)}; \ + __ret_839 = __noswap_vcmlaq_rot270_f64(__rev0_839, __rev1_839, *(float64x2_t *) &__reint1_839); \ + __ret_839 = __builtin_shufflevector(__ret_839, __ret_839, 1, 0); \ + __ret_839; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot90_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot90_lane_f64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ + float64x1_t __ret_840; \ + float64x1_t __s0_840 = __p0_840; \ + float64x1_t __s1_840 = __p1_840; \ + float64x1_t __s2_840 = __p2_840; \ +float64x1_t __reint_840 = __s2_840; \ +uint64x2_t __reint1_840 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840), vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840)}; \ + __ret_840 = vcmla_rot90_f64(__s0_840, __s1_840, *(float64x1_t *) &__reint1_840); \ + __ret_840; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ + float64x2_t __ret_841; \ + float64x2_t __s0_841 = __p0_841; \ + float64x2_t __s1_841 = __p1_841; \ + float64x1_t __s2_841 = __p2_841; \ +float64x1_t __reint_841 = __s2_841; \ +uint64x2_t __reint1_841 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841), vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841)}; \ + __ret_841 = vcmlaq_rot90_f64(__s0_841, __s1_841, *(float64x2_t *) &__reint1_841); \ + __ret_841; \ +}) +#else +#define vcmlaq_rot90_lane_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ + float64x2_t __ret_842; \ + float64x2_t __s0_842 = __p0_842; \ + float64x2_t __s1_842 = __p1_842; \ + float64x1_t __s2_842 = __p2_842; \ + float64x2_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \ + float64x2_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \ +float64x1_t __reint_842 = __s2_842; \ +uint64x2_t __reint1_842 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842)}; \ + __ret_842 = __noswap_vcmlaq_rot90_f64(__rev0_842, __rev1_842, *(float64x2_t *) &__reint1_842); \ + __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \ + __ret_842; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ + float64x1_t __ret_843; \ + float64x1_t __s0_843 = __p0_843; \ + float64x1_t __s1_843 = __p1_843; \ + float64x2_t __s2_843 = __p2_843; \ +float64x2_t __reint_843 = __s2_843; \ +uint64x2_t __reint1_843 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843), vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843)}; \ + __ret_843 = vcmla_rot90_f64(__s0_843, __s1_843, *(float64x1_t *) &__reint1_843); \ + __ret_843; \ +}) +#else +#define vcmla_rot90_laneq_f64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ + float64x1_t __ret_844; \ + float64x1_t __s0_844 = __p0_844; \ + float64x1_t __s1_844 = __p1_844; \ + float64x2_t __s2_844 = __p2_844; \ + float64x2_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \ +float64x2_t __reint_844 = __rev2_844; \ +uint64x2_t __reint1_844 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844)}; \ + __ret_844 = vcmla_rot90_f64(__s0_844, __s1_844, *(float64x1_t *) &__reint1_844); \ + __ret_844; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ + float64x2_t __ret_845; \ + float64x2_t __s0_845 = __p0_845; \ + float64x2_t __s1_845 = __p1_845; \ + float64x2_t __s2_845 = __p2_845; \ +float64x2_t __reint_845 = __s2_845; \ +uint64x2_t __reint1_845 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845), vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845)}; \ + __ret_845 = vcmlaq_rot90_f64(__s0_845, __s1_845, *(float64x2_t *) &__reint1_845); \ + __ret_845; \ +}) +#else +#define vcmlaq_rot90_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ + float64x2_t __ret_846; \ + float64x2_t __s0_846 = __p0_846; \ + float64x2_t __s1_846 = __p1_846; \ + float64x2_t __s2_846 = __p2_846; \ + float64x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ + float64x2_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \ + float64x2_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \ +float64x2_t __reint_846 = __rev2_846; \ +uint64x2_t __reint1_846 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846)}; \ + __ret_846 = __noswap_vcmlaq_rot90_f64(__rev0_846, __rev1_846, *(float64x2_t *) &__reint1_846); \ + __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ + __ret_846; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a"))) float64x1_t vrnd32x_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd32x_f64((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a"))) float64x1_t vrnd32z_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd32z_f64((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a"))) float64x1_t vrnd64x_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd64x_f64((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a"))) float64x1_t vrnd64z_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd64z_f64((int8x8_t)__p0, 10); + return __ret; +} +#endif +#if defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} +#endif +#if defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + vabdq_u8(__p1, __p2); + return __ret; +} +#else +__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdq_u32(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdq_u16(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + vabdq_s8(__p1, __p2); + return __ret; +} +#else +__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdq_s32(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdq_s16(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + vabd_u8(__p1, __p2); + return __ret; +} +#else +__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + vabd_u32(__p1, __p2); + return __ret; +} +#else +__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + vabd_u16(__p1, __p2); + return __ret; +} +#else +__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + vabd_s8(__p1, __p2); + return __ret; +} +#else +__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + vabd_s32(__p1, __p2); + return __ret; +} +#else +__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + vabd_s16(__p1, __p2); + return __ret; +} +#else +__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); + return __ret; +} +#else +__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); + return __ret; +} +#else +__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); + return __ret; +} +#else +__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); + return __ret; +} +#else +__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); + return __ret; +} +#else +__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); + return __ret; +} +#else +__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) + vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) + vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) + vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) + vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) + vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) + vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \ + float16_t __ret_847; \ + float16x4_t __s0_847 = __p0_847; \ +float16x4_t __reint_847 = __s0_847; \ +int16_t __reint1_847 = vget_lane_s16(*(int16x4_t *) &__reint_847, __p1_847); \ + __ret_847 = *(float16_t *) &__reint1_847; \ + __ret_847; \ +}) +#else +#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \ + float16_t __ret_848; \ + float16x4_t __s0_848 = __p0_848; \ + float16x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ +float16x4_t __reint_848 = __rev0_848; \ +int16_t __reint1_848 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_848, __p1_848); \ + __ret_848 = *(float16_t *) &__reint1_848; \ + __ret_848; \ +}) +#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \ + float16_t __ret_849; \ + float16x4_t __s0_849 = __p0_849; \ +float16x4_t __reint_849 = __s0_849; \ +int16_t __reint1_849 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_849, __p1_849); \ + __ret_849 = *(float16_t *) &__reint1_849; \ + __ret_849; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \ + float16_t __ret_850; \ + float16x8_t __s0_850 = __p0_850; \ +float16x8_t __reint_850 = __s0_850; \ +int16_t __reint1_850 = vgetq_lane_s16(*(int16x8_t *) &__reint_850, __p1_850); \ + __ret_850 = *(float16_t *) &__reint1_850; \ + __ret_850; \ +}) +#else +#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \ + float16_t __ret_851; \ + float16x8_t __s0_851 = __p0_851; \ + float16x8_t __rev0_851; __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_851 = __rev0_851; \ +int16_t __reint1_851 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_851, __p1_851); \ + __ret_851 = *(float16_t *) &__reint1_851; \ + __ret_851; \ +}) +#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \ + float16_t __ret_852; \ + float16x8_t __s0_852 = __p0_852; \ +float16x8_t __reint_852 = __s0_852; \ +int16_t __reint1_852 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_852, __p1_852); \ + __ret_852 = *(float16_t *) &__reint1_852; \ + __ret_852; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ + uint64x2_t __ret_853; \ + uint64x2_t __s0_853 = __p0_853; \ + uint32x2_t __s1_853 = __p1_853; \ + uint32x2_t __s2_853 = __p2_853; \ + __ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \ + __ret_853; \ +}) +#else +#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ + uint64x2_t __ret_854; \ + uint64x2_t __s0_854 = __p0_854; \ + uint32x2_t __s1_854 = __p1_854; \ + uint32x2_t __s2_854 = __p2_854; \ + uint64x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ + uint32x2_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \ + uint32x2_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \ + __ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \ + __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ + __ret_854; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ + uint32x4_t __ret_855; \ + uint32x4_t __s0_855 = __p0_855; \ + uint16x4_t __s1_855 = __p1_855; \ + uint16x4_t __s2_855 = __p2_855; \ + __ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \ + __ret_855; \ +}) +#else +#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ + uint32x4_t __ret_856; \ + uint32x4_t __s0_856 = __p0_856; \ + uint16x4_t __s1_856 = __p1_856; \ + uint16x4_t __s2_856 = __p2_856; \ + uint32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ + uint16x4_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \ + uint16x4_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \ + __ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \ + __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ + __ret_856; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ + int64x2_t __ret_857; \ + int64x2_t __s0_857 = __p0_857; \ + int32x2_t __s1_857 = __p1_857; \ + int32x2_t __s2_857 = __p2_857; \ + __ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \ + __ret_857; \ +}) +#else +#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ + int64x2_t __ret_858; \ + int64x2_t __s0_858 = __p0_858; \ + int32x2_t __s1_858 = __p1_858; \ + int32x2_t __s2_858 = __p2_858; \ + int64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ + int32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \ + int32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \ + __ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \ + __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ + __ret_858; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \ + int32x4_t __ret_859; \ + int32x4_t __s0_859 = __p0_859; \ + int16x4_t __s1_859 = __p1_859; \ + int16x4_t __s2_859 = __p2_859; \ + __ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \ + __ret_859; \ +}) +#else +#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \ + int32x4_t __ret_860; \ + int32x4_t __s0_860 = __p0_860; \ + int16x4_t __s1_860 = __p1_860; \ + int16x4_t __s2_860 = __p2_860; \ + int32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ + int16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \ + int16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \ + __ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \ + __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \ + __ret_860; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ + uint64x2_t __ret_861; \ + uint64x2_t __s0_861 = __p0_861; \ + uint32x2_t __s1_861 = __p1_861; \ + uint32x2_t __s2_861 = __p2_861; \ + __ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \ + __ret_861; \ +}) +#else +#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ + uint64x2_t __ret_862; \ + uint64x2_t __s0_862 = __p0_862; \ + uint32x2_t __s1_862 = __p1_862; \ + uint32x2_t __s2_862 = __p2_862; \ + uint64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \ + uint32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \ + uint32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \ + __ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \ + __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \ + __ret_862; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ + uint32x4_t __ret_863; \ + uint32x4_t __s0_863 = __p0_863; \ + uint16x4_t __s1_863 = __p1_863; \ + uint16x4_t __s2_863 = __p2_863; \ + __ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \ + __ret_863; \ +}) +#else +#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ + uint32x4_t __ret_864; \ + uint32x4_t __s0_864 = __p0_864; \ + uint16x4_t __s1_864 = __p1_864; \ + uint16x4_t __s2_864 = __p2_864; \ + uint32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ + uint16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \ + uint16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \ + __ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \ + __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ + __ret_864; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ + int64x2_t __ret_865; \ + int64x2_t __s0_865 = __p0_865; \ + int32x2_t __s1_865 = __p1_865; \ + int32x2_t __s2_865 = __p2_865; \ + __ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \ + __ret_865; \ +}) +#else +#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ + int64x2_t __ret_866; \ + int64x2_t __s0_866 = __p0_866; \ + int32x2_t __s1_866 = __p1_866; \ + int32x2_t __s2_866 = __p2_866; \ + int64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ + int32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ + int32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ + __ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \ + __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ + __ret_866; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ + int32x4_t __ret_867; \ + int32x4_t __s0_867 = __p0_867; \ + int16x4_t __s1_867 = __p1_867; \ + int16x4_t __s2_867 = __p2_867; \ + __ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \ + __ret_867; \ +}) +#else +#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ + int32x4_t __ret_868; \ + int32x4_t __s0_868 = __p0_868; \ + int16x4_t __s1_868 = __p1_868; \ + int16x4_t __s2_868 = __p2_868; \ + int32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ + int16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ + int16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ + __ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \ + __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ + __ret_868; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \ + float16x4_t __ret_869; \ + float16_t __s0_869 = __p0_869; \ + float16x4_t __s1_869 = __p1_869; \ +float16_t __reint_869 = __s0_869; \ +float16x4_t __reint1_869 = __s1_869; \ +int16x4_t __reint2_869 = vset_lane_s16(*(int16_t *) &__reint_869, *(int16x4_t *) &__reint1_869, __p2_869); \ + __ret_869 = *(float16x4_t *) &__reint2_869; \ + __ret_869; \ +}) +#else +#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \ + float16x4_t __ret_870; \ + float16_t __s0_870 = __p0_870; \ + float16x4_t __s1_870 = __p1_870; \ + float16x4_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 3, 2, 1, 0); \ +float16_t __reint_870 = __s0_870; \ +float16x4_t __reint1_870 = __rev1_870; \ +int16x4_t __reint2_870 = __noswap_vset_lane_s16(*(int16_t *) &__reint_870, *(int16x4_t *) &__reint1_870, __p2_870); \ + __ret_870 = *(float16x4_t *) &__reint2_870; \ + __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \ + __ret_870; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \ + float16x8_t __ret_871; \ + float16_t __s0_871 = __p0_871; \ + float16x8_t __s1_871 = __p1_871; \ +float16_t __reint_871 = __s0_871; \ +float16x8_t __reint1_871 = __s1_871; \ +int16x8_t __reint2_871 = vsetq_lane_s16(*(int16_t *) &__reint_871, *(int16x8_t *) &__reint1_871, __p2_871); \ + __ret_871 = *(float16x8_t *) &__reint2_871; \ + __ret_871; \ +}) +#else +#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \ + float16x8_t __ret_872; \ + float16_t __s0_872 = __p0_872; \ + float16x8_t __s1_872 = __p1_872; \ + float16x8_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16_t __reint_872 = __s0_872; \ +float16x8_t __reint1_872 = __rev1_872; \ +int16x8_t __reint2_872 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_872, *(int16x8_t *) &__reint1_872, __p2_872); \ + __ret_872 = *(float16x8_t *) &__reint2_872; \ + __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_872; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_lane_f32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ + float32x4_t __ret_873; \ + float32x4_t __s0_873 = __p0_873; \ + bfloat16x8_t __s1_873 = __p1_873; \ + bfloat16x4_t __s2_873 = __p2_873; \ + __ret_873 = vbfmlalbq_f32(__s0_873, __s1_873, (bfloat16x8_t) {vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873)}); \ + __ret_873; \ +}) +#else +#define vbfmlalbq_lane_f32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ + float32x4_t __ret_874; \ + float32x4_t __s0_874 = __p0_874; \ + bfloat16x8_t __s1_874 = __p1_874; \ + bfloat16x4_t __s2_874 = __p2_874; \ + float32x4_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \ + __ret_874 = __noswap_vbfmlalbq_f32(__rev0_874, __rev1_874, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874)}); \ + __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \ + __ret_874; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_laneq_f32(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ + float32x4_t __ret_875; \ + float32x4_t __s0_875 = __p0_875; \ + bfloat16x8_t __s1_875 = __p1_875; \ + bfloat16x8_t __s2_875 = __p2_875; \ + __ret_875 = vbfmlalbq_f32(__s0_875, __s1_875, (bfloat16x8_t) {vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875)}); \ + __ret_875; \ +}) +#else +#define vbfmlalbq_laneq_f32(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ + float32x4_t __ret_876; \ + float32x4_t __s0_876 = __p0_876; \ + bfloat16x8_t __s1_876 = __p1_876; \ + bfloat16x8_t __s2_876 = __p2_876; \ + float32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_876 = __noswap_vbfmlalbq_f32(__rev0_876, __rev1_876, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876)}); \ + __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ + __ret_876; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ + float32x4_t __ret_877; \ + float32x4_t __s0_877 = __p0_877; \ + bfloat16x8_t __s1_877 = __p1_877; \ + bfloat16x4_t __s2_877 = __p2_877; \ + __ret_877 = vbfmlaltq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \ + __ret_877; \ +}) +#else +#define vbfmlaltq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ + float32x4_t __ret_878; \ + float32x4_t __s0_878 = __p0_878; \ + bfloat16x8_t __s1_878 = __p1_878; \ + bfloat16x4_t __s2_878 = __p2_878; \ + float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \ + __ret_878 = __noswap_vbfmlaltq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \ + __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \ + __ret_878; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ + float32x4_t __ret_879; \ + float32x4_t __s0_879 = __p0_879; \ + bfloat16x8_t __s1_879 = __p1_879; \ + bfloat16x8_t __s2_879 = __p2_879; \ + __ret_879 = vbfmlaltq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \ + __ret_879; \ +}) +#else +#define vbfmlaltq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ + float32x4_t __ret_880; \ + float32x4_t __s0_880 = __p0_880; \ + bfloat16x8_t __s1_880 = __p1_880; \ + bfloat16x8_t __s2_880 = __p2_880; \ + float32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_880 = __noswap_vbfmlaltq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \ + __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ + __ret_880; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_lane_s32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ + int32x4_t __ret_881; \ + int32x4_t __s0_881 = __p0_881; \ + int8x16_t __s1_881 = __p1_881; \ + uint8x8_t __s2_881 = __p2_881; \ +uint8x8_t __reint_881 = __s2_881; \ + __ret_881 = vusdotq_s32(__s0_881, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_881, __p3_881)), __s1_881); \ + __ret_881; \ +}) +#else +#define vsudotq_lane_s32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ + int32x4_t __ret_882; \ + int32x4_t __s0_882 = __p0_882; \ + int8x16_t __s1_882 = __p1_882; \ + uint8x8_t __s2_882 = __p2_882; \ + int32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ + int8x16_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_882 = __rev2_882; \ + __ret_882 = __noswap_vusdotq_s32(__rev0_882, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_882, __p3_882)), __rev1_882); \ + __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ + __ret_882; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_lane_s32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ + int32x2_t __ret_883; \ + int32x2_t __s0_883 = __p0_883; \ + int8x8_t __s1_883 = __p1_883; \ + uint8x8_t __s2_883 = __p2_883; \ +uint8x8_t __reint_883 = __s2_883; \ + __ret_883 = vusdot_s32(__s0_883, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_883, __p3_883)), __s1_883); \ + __ret_883; \ +}) +#else +#define vsudot_lane_s32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ + int32x2_t __ret_884; \ + int32x2_t __s0_884 = __p0_884; \ + int8x8_t __s1_884 = __p1_884; \ + uint8x8_t __s2_884 = __p2_884; \ + int32x2_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \ + int8x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_884 = __rev2_884; \ + __ret_884 = __noswap_vusdot_s32(__rev0_884, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_884, __p3_884)), __rev1_884); \ + __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \ + __ret_884; \ +}) +#endif + +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p64(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ + poly64x2_t __ret_885; \ + poly64x2_t __s0_885 = __p0_885; \ + poly64x1_t __s2_885 = __p2_885; \ + __ret_885 = vsetq_lane_p64(vget_lane_p64(__s2_885, __p3_885), __s0_885, __p1_885); \ + __ret_885; \ +}) +#else +#define vcopyq_lane_p64(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ + poly64x2_t __ret_886; \ + poly64x2_t __s0_886 = __p0_886; \ + poly64x1_t __s2_886 = __p2_886; \ + poly64x2_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 1, 0); \ + __ret_886 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_886, __p3_886), __rev0_886, __p1_886); \ + __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 1, 0); \ + __ret_886; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f64(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ + float64x2_t __ret_887; \ + float64x2_t __s0_887 = __p0_887; \ + float64x1_t __s2_887 = __p2_887; \ + __ret_887 = vsetq_lane_f64(vget_lane_f64(__s2_887, __p3_887), __s0_887, __p1_887); \ + __ret_887; \ +}) +#else +#define vcopyq_lane_f64(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ + float64x2_t __ret_888; \ + float64x2_t __s0_888 = __p0_888; \ + float64x1_t __s2_888 = __p2_888; \ + float64x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ + __ret_888 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_888, __p3_888), __rev0_888, __p1_888); \ + __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ + __ret_888; \ +}) +#endif + +#define vcopy_lane_p64(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ + poly64x1_t __ret_889; \ + poly64x1_t __s0_889 = __p0_889; \ + poly64x1_t __s2_889 = __p2_889; \ + __ret_889 = vset_lane_p64(vget_lane_p64(__s2_889, __p3_889), __s0_889, __p1_889); \ + __ret_889; \ +}) +#define vcopy_lane_f64(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ + float64x1_t __ret_890; \ + float64x1_t __s0_890 = __p0_890; \ + float64x1_t __s2_890 = __p2_890; \ + __ret_890 = vset_lane_f64(vget_lane_f64(__s2_890, __p3_890), __s0_890, __p1_890); \ + __ret_890; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p64(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ + poly64x2_t __ret_891; \ + poly64x2_t __s0_891 = __p0_891; \ + poly64x2_t __s2_891 = __p2_891; \ + __ret_891 = vsetq_lane_p64(vgetq_lane_p64(__s2_891, __p3_891), __s0_891, __p1_891); \ + __ret_891; \ +}) +#else +#define vcopyq_laneq_p64(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ + poly64x2_t __ret_892; \ + poly64x2_t __s0_892 = __p0_892; \ + poly64x2_t __s2_892 = __p2_892; \ + poly64x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ + poly64x2_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 1, 0); \ + __ret_892 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_892, __p3_892), __rev0_892, __p1_892); \ + __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ + __ret_892; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f64(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ + float64x2_t __ret_893; \ + float64x2_t __s0_893 = __p0_893; \ + float64x2_t __s2_893 = __p2_893; \ + __ret_893 = vsetq_lane_f64(vgetq_lane_f64(__s2_893, __p3_893), __s0_893, __p1_893); \ + __ret_893; \ +}) +#else +#define vcopyq_laneq_f64(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ + float64x2_t __ret_894; \ + float64x2_t __s0_894 = __p0_894; \ + float64x2_t __s2_894 = __p2_894; \ + float64x2_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 1, 0); \ + float64x2_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 1, 0); \ + __ret_894 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_894, __p3_894), __rev0_894, __p1_894); \ + __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 1, 0); \ + __ret_894; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p64(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ + poly64x1_t __ret_895; \ + poly64x1_t __s0_895 = __p0_895; \ + poly64x2_t __s2_895 = __p2_895; \ + __ret_895 = vset_lane_p64(vgetq_lane_p64(__s2_895, __p3_895), __s0_895, __p1_895); \ + __ret_895; \ +}) +#else +#define vcopy_laneq_p64(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ + poly64x1_t __ret_896; \ + poly64x1_t __s0_896 = __p0_896; \ + poly64x2_t __s2_896 = __p2_896; \ + poly64x2_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 1, 0); \ + __ret_896 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_896, __p3_896), __s0_896, __p1_896); \ + __ret_896; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f64(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ + float64x1_t __ret_897; \ + float64x1_t __s0_897 = __p0_897; \ + float64x2_t __s2_897 = __p2_897; \ + __ret_897 = vset_lane_f64(vgetq_lane_f64(__s2_897, __p3_897), __s0_897, __p1_897); \ + __ret_897; \ +}) +#else +#define vcopy_laneq_f64(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ + float64x1_t __ret_898; \ + float64x1_t __s0_898 = __p0_898; \ + float64x2_t __s2_898 = __p2_898; \ + float64x2_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 1, 0); \ + __ret_898 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_898, __p3_898), __s0_898, __p1_898); \ + __ret_898; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vmulx_lane_f64(__p0_899, __p1_899, __p2_899) __extension__ ({ \ + float64x1_t __ret_899; \ + float64x1_t __s0_899 = __p0_899; \ + float64x1_t __s1_899 = __p1_899; \ + float64_t __x_899 = vget_lane_f64(__s0_899, 0); \ + float64_t __y_899 = vget_lane_f64(__s1_899, __p2_899); \ + float64_t __z_899 = vmulxd_f64(__x_899, __y_899); \ + __ret_899 = vset_lane_f64(__z_899, __s0_899, __p2_899); \ + __ret_899; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f64(__p0_900, __p1_900, __p2_900) __extension__ ({ \ + float64x1_t __ret_900; \ + float64x1_t __s0_900 = __p0_900; \ + float64x2_t __s1_900 = __p1_900; \ + float64_t __x_900 = vget_lane_f64(__s0_900, 0); \ + float64_t __y_900 = vgetq_lane_f64(__s1_900, __p2_900); \ + float64_t __z_900 = vmulxd_f64(__x_900, __y_900); \ + __ret_900 = vset_lane_f64(__z_900, __s0_900, 0); \ + __ret_900; \ +}) +#else +#define vmulx_laneq_f64(__p0_901, __p1_901, __p2_901) __extension__ ({ \ + float64x1_t __ret_901; \ + float64x1_t __s0_901 = __p0_901; \ + float64x2_t __s1_901 = __p1_901; \ + float64x2_t __rev1_901; __rev1_901 = __builtin_shufflevector(__s1_901, __s1_901, 1, 0); \ + float64_t __x_901 = vget_lane_f64(__s0_901, 0); \ + float64_t __y_901 = __noswap_vgetq_lane_f64(__rev1_901, __p2_901); \ + float64_t __z_901 = vmulxd_f64(__x_901, __y_901); \ + __ret_901 = vset_lane_f64(__z_901, __s0_901, 0); \ + __ret_901; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ + float32x4_t __ret_902; \ + float32x4_t __s0_902 = __p0_902; \ + float16x8_t __s1_902 = __p1_902; \ + float16x4_t __s2_902 = __p2_902; \ + __ret_902 = vfmlalq_high_f16(__s0_902, __s1_902, (float16x8_t) {vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902)}); \ + __ret_902; \ +}) +#else +#define vfmlalq_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ + float32x4_t __ret_903; \ + float32x4_t __s0_903 = __p0_903; \ + float16x8_t __s1_903 = __p1_903; \ + float16x4_t __s2_903 = __p2_903; \ + float32x4_t __rev0_903; __rev0_903 = __builtin_shufflevector(__s0_903, __s0_903, 3, 2, 1, 0); \ + float16x8_t __rev1_903; __rev1_903 = __builtin_shufflevector(__s1_903, __s1_903, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_903; __rev2_903 = __builtin_shufflevector(__s2_903, __s2_903, 3, 2, 1, 0); \ + __ret_903 = __noswap_vfmlalq_high_f16(__rev0_903, __rev1_903, (float16x8_t) {__noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903)}); \ + __ret_903 = __builtin_shufflevector(__ret_903, __ret_903, 3, 2, 1, 0); \ + __ret_903; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ + float32x2_t __ret_904; \ + float32x2_t __s0_904 = __p0_904; \ + float16x4_t __s1_904 = __p1_904; \ + float16x4_t __s2_904 = __p2_904; \ + __ret_904 = vfmlal_high_f16(__s0_904, __s1_904, (float16x4_t) {vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904)}); \ + __ret_904; \ +}) +#else +#define vfmlal_lane_high_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ + float32x2_t __ret_905; \ + float32x2_t __s0_905 = __p0_905; \ + float16x4_t __s1_905 = __p1_905; \ + float16x4_t __s2_905 = __p2_905; \ + float32x2_t __rev0_905; __rev0_905 = __builtin_shufflevector(__s0_905, __s0_905, 1, 0); \ + float16x4_t __rev1_905; __rev1_905 = __builtin_shufflevector(__s1_905, __s1_905, 3, 2, 1, 0); \ + float16x4_t __rev2_905; __rev2_905 = __builtin_shufflevector(__s2_905, __s2_905, 3, 2, 1, 0); \ + __ret_905 = __noswap_vfmlal_high_f16(__rev0_905, __rev1_905, (float16x4_t) {__noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905)}); \ + __ret_905 = __builtin_shufflevector(__ret_905, __ret_905, 1, 0); \ + __ret_905; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ + float32x4_t __ret_906; \ + float32x4_t __s0_906 = __p0_906; \ + float16x8_t __s1_906 = __p1_906; \ + float16x4_t __s2_906 = __p2_906; \ + __ret_906 = vfmlalq_low_f16(__s0_906, __s1_906, (float16x8_t) {vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906)}); \ + __ret_906; \ +}) +#else +#define vfmlalq_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ + float32x4_t __ret_907; \ + float32x4_t __s0_907 = __p0_907; \ + float16x8_t __s1_907 = __p1_907; \ + float16x4_t __s2_907 = __p2_907; \ + float32x4_t __rev0_907; __rev0_907 = __builtin_shufflevector(__s0_907, __s0_907, 3, 2, 1, 0); \ + float16x8_t __rev1_907; __rev1_907 = __builtin_shufflevector(__s1_907, __s1_907, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_907; __rev2_907 = __builtin_shufflevector(__s2_907, __s2_907, 3, 2, 1, 0); \ + __ret_907 = __noswap_vfmlalq_low_f16(__rev0_907, __rev1_907, (float16x8_t) {__noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907)}); \ + __ret_907 = __builtin_shufflevector(__ret_907, __ret_907, 3, 2, 1, 0); \ + __ret_907; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ + float32x2_t __ret_908; \ + float32x2_t __s0_908 = __p0_908; \ + float16x4_t __s1_908 = __p1_908; \ + float16x4_t __s2_908 = __p2_908; \ + __ret_908 = vfmlal_low_f16(__s0_908, __s1_908, (float16x4_t) {vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908)}); \ + __ret_908; \ +}) +#else +#define vfmlal_lane_low_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ + float32x2_t __ret_909; \ + float32x2_t __s0_909 = __p0_909; \ + float16x4_t __s1_909 = __p1_909; \ + float16x4_t __s2_909 = __p2_909; \ + float32x2_t __rev0_909; __rev0_909 = __builtin_shufflevector(__s0_909, __s0_909, 1, 0); \ + float16x4_t __rev1_909; __rev1_909 = __builtin_shufflevector(__s1_909, __s1_909, 3, 2, 1, 0); \ + float16x4_t __rev2_909; __rev2_909 = __builtin_shufflevector(__s2_909, __s2_909, 3, 2, 1, 0); \ + __ret_909 = __noswap_vfmlal_low_f16(__rev0_909, __rev1_909, (float16x4_t) {__noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909)}); \ + __ret_909 = __builtin_shufflevector(__ret_909, __ret_909, 1, 0); \ + __ret_909; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ + float32x4_t __ret_910; \ + float32x4_t __s0_910 = __p0_910; \ + float16x8_t __s1_910 = __p1_910; \ + float16x8_t __s2_910 = __p2_910; \ + __ret_910 = vfmlalq_high_f16(__s0_910, __s1_910, (float16x8_t) {vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910)}); \ + __ret_910; \ +}) +#else +#define vfmlalq_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ + float32x4_t __ret_911; \ + float32x4_t __s0_911 = __p0_911; \ + float16x8_t __s1_911 = __p1_911; \ + float16x8_t __s2_911 = __p2_911; \ + float32x4_t __rev0_911; __rev0_911 = __builtin_shufflevector(__s0_911, __s0_911, 3, 2, 1, 0); \ + float16x8_t __rev1_911; __rev1_911 = __builtin_shufflevector(__s1_911, __s1_911, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_911; __rev2_911 = __builtin_shufflevector(__s2_911, __s2_911, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_911 = __noswap_vfmlalq_high_f16(__rev0_911, __rev1_911, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911)}); \ + __ret_911 = __builtin_shufflevector(__ret_911, __ret_911, 3, 2, 1, 0); \ + __ret_911; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ + float32x2_t __ret_912; \ + float32x2_t __s0_912 = __p0_912; \ + float16x4_t __s1_912 = __p1_912; \ + float16x8_t __s2_912 = __p2_912; \ + __ret_912 = vfmlal_high_f16(__s0_912, __s1_912, (float16x4_t) {vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912)}); \ + __ret_912; \ +}) +#else +#define vfmlal_laneq_high_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ + float32x2_t __ret_913; \ + float32x2_t __s0_913 = __p0_913; \ + float16x4_t __s1_913 = __p1_913; \ + float16x8_t __s2_913 = __p2_913; \ + float32x2_t __rev0_913; __rev0_913 = __builtin_shufflevector(__s0_913, __s0_913, 1, 0); \ + float16x4_t __rev1_913; __rev1_913 = __builtin_shufflevector(__s1_913, __s1_913, 3, 2, 1, 0); \ + float16x8_t __rev2_913; __rev2_913 = __builtin_shufflevector(__s2_913, __s2_913, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_913 = __noswap_vfmlal_high_f16(__rev0_913, __rev1_913, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913)}); \ + __ret_913 = __builtin_shufflevector(__ret_913, __ret_913, 1, 0); \ + __ret_913; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ + float32x4_t __ret_914; \ + float32x4_t __s0_914 = __p0_914; \ + float16x8_t __s1_914 = __p1_914; \ + float16x8_t __s2_914 = __p2_914; \ + __ret_914 = vfmlalq_low_f16(__s0_914, __s1_914, (float16x8_t) {vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914)}); \ + __ret_914; \ +}) +#else +#define vfmlalq_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ + float32x4_t __ret_915; \ + float32x4_t __s0_915 = __p0_915; \ + float16x8_t __s1_915 = __p1_915; \ + float16x8_t __s2_915 = __p2_915; \ + float32x4_t __rev0_915; __rev0_915 = __builtin_shufflevector(__s0_915, __s0_915, 3, 2, 1, 0); \ + float16x8_t __rev1_915; __rev1_915 = __builtin_shufflevector(__s1_915, __s1_915, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_915; __rev2_915 = __builtin_shufflevector(__s2_915, __s2_915, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_915 = __noswap_vfmlalq_low_f16(__rev0_915, __rev1_915, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915)}); \ + __ret_915 = __builtin_shufflevector(__ret_915, __ret_915, 3, 2, 1, 0); \ + __ret_915; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ + float32x2_t __ret_916; \ + float32x2_t __s0_916 = __p0_916; \ + float16x4_t __s1_916 = __p1_916; \ + float16x8_t __s2_916 = __p2_916; \ + __ret_916 = vfmlal_low_f16(__s0_916, __s1_916, (float16x4_t) {vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916)}); \ + __ret_916; \ +}) +#else +#define vfmlal_laneq_low_f16(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \ + float32x2_t __ret_917; \ + float32x2_t __s0_917 = __p0_917; \ + float16x4_t __s1_917 = __p1_917; \ + float16x8_t __s2_917 = __p2_917; \ + float32x2_t __rev0_917; __rev0_917 = __builtin_shufflevector(__s0_917, __s0_917, 1, 0); \ + float16x4_t __rev1_917; __rev1_917 = __builtin_shufflevector(__s1_917, __s1_917, 3, 2, 1, 0); \ + float16x8_t __rev2_917; __rev2_917 = __builtin_shufflevector(__s2_917, __s2_917, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_917 = __noswap_vfmlal_low_f16(__rev0_917, __rev1_917, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917)}); \ + __ret_917 = __builtin_shufflevector(__ret_917, __ret_917, 1, 0); \ + __ret_917; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_high_f16(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \ + float32x4_t __ret_918; \ + float32x4_t __s0_918 = __p0_918; \ + float16x8_t __s1_918 = __p1_918; \ + float16x4_t __s2_918 = __p2_918; \ + __ret_918 = vfmlslq_high_f16(__s0_918, __s1_918, (float16x8_t) {vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918)}); \ + __ret_918; \ +}) +#else +#define vfmlslq_lane_high_f16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \ + float32x4_t __ret_919; \ + float32x4_t __s0_919 = __p0_919; \ + float16x8_t __s1_919 = __p1_919; \ + float16x4_t __s2_919 = __p2_919; \ + float32x4_t __rev0_919; __rev0_919 = __builtin_shufflevector(__s0_919, __s0_919, 3, 2, 1, 0); \ + float16x8_t __rev1_919; __rev1_919 = __builtin_shufflevector(__s1_919, __s1_919, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_919; __rev2_919 = __builtin_shufflevector(__s2_919, __s2_919, 3, 2, 1, 0); \ + __ret_919 = __noswap_vfmlslq_high_f16(__rev0_919, __rev1_919, (float16x8_t) {__noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919)}); \ + __ret_919 = __builtin_shufflevector(__ret_919, __ret_919, 3, 2, 1, 0); \ + __ret_919; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_high_f16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \ + float32x2_t __ret_920; \ + float32x2_t __s0_920 = __p0_920; \ + float16x4_t __s1_920 = __p1_920; \ + float16x4_t __s2_920 = __p2_920; \ + __ret_920 = vfmlsl_high_f16(__s0_920, __s1_920, (float16x4_t) {vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920)}); \ + __ret_920; \ +}) +#else +#define vfmlsl_lane_high_f16(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ + float32x2_t __ret_921; \ + float32x2_t __s0_921 = __p0_921; \ + float16x4_t __s1_921 = __p1_921; \ + float16x4_t __s2_921 = __p2_921; \ + float32x2_t __rev0_921; __rev0_921 = __builtin_shufflevector(__s0_921, __s0_921, 1, 0); \ + float16x4_t __rev1_921; __rev1_921 = __builtin_shufflevector(__s1_921, __s1_921, 3, 2, 1, 0); \ + float16x4_t __rev2_921; __rev2_921 = __builtin_shufflevector(__s2_921, __s2_921, 3, 2, 1, 0); \ + __ret_921 = __noswap_vfmlsl_high_f16(__rev0_921, __rev1_921, (float16x4_t) {__noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921)}); \ + __ret_921 = __builtin_shufflevector(__ret_921, __ret_921, 1, 0); \ + __ret_921; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_low_f16(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ + float32x4_t __ret_922; \ + float32x4_t __s0_922 = __p0_922; \ + float16x8_t __s1_922 = __p1_922; \ + float16x4_t __s2_922 = __p2_922; \ + __ret_922 = vfmlslq_low_f16(__s0_922, __s1_922, (float16x8_t) {vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922)}); \ + __ret_922; \ +}) +#else +#define vfmlslq_lane_low_f16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ + float32x4_t __ret_923; \ + float32x4_t __s0_923 = __p0_923; \ + float16x8_t __s1_923 = __p1_923; \ + float16x4_t __s2_923 = __p2_923; \ + float32x4_t __rev0_923; __rev0_923 = __builtin_shufflevector(__s0_923, __s0_923, 3, 2, 1, 0); \ + float16x8_t __rev1_923; __rev1_923 = __builtin_shufflevector(__s1_923, __s1_923, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_923; __rev2_923 = __builtin_shufflevector(__s2_923, __s2_923, 3, 2, 1, 0); \ + __ret_923 = __noswap_vfmlslq_low_f16(__rev0_923, __rev1_923, (float16x8_t) {__noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923)}); \ + __ret_923 = __builtin_shufflevector(__ret_923, __ret_923, 3, 2, 1, 0); \ + __ret_923; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_low_f16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ + float32x2_t __ret_924; \ + float32x2_t __s0_924 = __p0_924; \ + float16x4_t __s1_924 = __p1_924; \ + float16x4_t __s2_924 = __p2_924; \ + __ret_924 = vfmlsl_low_f16(__s0_924, __s1_924, (float16x4_t) {vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924)}); \ + __ret_924; \ +}) +#else +#define vfmlsl_lane_low_f16(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ + float32x2_t __ret_925; \ + float32x2_t __s0_925 = __p0_925; \ + float16x4_t __s1_925 = __p1_925; \ + float16x4_t __s2_925 = __p2_925; \ + float32x2_t __rev0_925; __rev0_925 = __builtin_shufflevector(__s0_925, __s0_925, 1, 0); \ + float16x4_t __rev1_925; __rev1_925 = __builtin_shufflevector(__s1_925, __s1_925, 3, 2, 1, 0); \ + float16x4_t __rev2_925; __rev2_925 = __builtin_shufflevector(__s2_925, __s2_925, 3, 2, 1, 0); \ + __ret_925 = __noswap_vfmlsl_low_f16(__rev0_925, __rev1_925, (float16x4_t) {__noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925)}); \ + __ret_925 = __builtin_shufflevector(__ret_925, __ret_925, 1, 0); \ + __ret_925; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_high_f16(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ + float32x4_t __ret_926; \ + float32x4_t __s0_926 = __p0_926; \ + float16x8_t __s1_926 = __p1_926; \ + float16x8_t __s2_926 = __p2_926; \ + __ret_926 = vfmlslq_high_f16(__s0_926, __s1_926, (float16x8_t) {vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926)}); \ + __ret_926; \ +}) +#else +#define vfmlslq_laneq_high_f16(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ + float32x4_t __ret_927; \ + float32x4_t __s0_927 = __p0_927; \ + float16x8_t __s1_927 = __p1_927; \ + float16x8_t __s2_927 = __p2_927; \ + float32x4_t __rev0_927; __rev0_927 = __builtin_shufflevector(__s0_927, __s0_927, 3, 2, 1, 0); \ + float16x8_t __rev1_927; __rev1_927 = __builtin_shufflevector(__s1_927, __s1_927, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_927; __rev2_927 = __builtin_shufflevector(__s2_927, __s2_927, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_927 = __noswap_vfmlslq_high_f16(__rev0_927, __rev1_927, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927)}); \ + __ret_927 = __builtin_shufflevector(__ret_927, __ret_927, 3, 2, 1, 0); \ + __ret_927; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_high_f16(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ + float32x2_t __ret_928; \ + float32x2_t __s0_928 = __p0_928; \ + float16x4_t __s1_928 = __p1_928; \ + float16x8_t __s2_928 = __p2_928; \ + __ret_928 = vfmlsl_high_f16(__s0_928, __s1_928, (float16x4_t) {vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928)}); \ + __ret_928; \ +}) +#else +#define vfmlsl_laneq_high_f16(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ + float32x2_t __ret_929; \ + float32x2_t __s0_929 = __p0_929; \ + float16x4_t __s1_929 = __p1_929; \ + float16x8_t __s2_929 = __p2_929; \ + float32x2_t __rev0_929; __rev0_929 = __builtin_shufflevector(__s0_929, __s0_929, 1, 0); \ + float16x4_t __rev1_929; __rev1_929 = __builtin_shufflevector(__s1_929, __s1_929, 3, 2, 1, 0); \ + float16x8_t __rev2_929; __rev2_929 = __builtin_shufflevector(__s2_929, __s2_929, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_929 = __noswap_vfmlsl_high_f16(__rev0_929, __rev1_929, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929)}); \ + __ret_929 = __builtin_shufflevector(__ret_929, __ret_929, 1, 0); \ + __ret_929; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_low_f16(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ + float32x4_t __ret_930; \ + float32x4_t __s0_930 = __p0_930; \ + float16x8_t __s1_930 = __p1_930; \ + float16x8_t __s2_930 = __p2_930; \ + __ret_930 = vfmlslq_low_f16(__s0_930, __s1_930, (float16x8_t) {vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930)}); \ + __ret_930; \ +}) +#else +#define vfmlslq_laneq_low_f16(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ + float32x4_t __ret_931; \ + float32x4_t __s0_931 = __p0_931; \ + float16x8_t __s1_931 = __p1_931; \ + float16x8_t __s2_931 = __p2_931; \ + float32x4_t __rev0_931; __rev0_931 = __builtin_shufflevector(__s0_931, __s0_931, 3, 2, 1, 0); \ + float16x8_t __rev1_931; __rev1_931 = __builtin_shufflevector(__s1_931, __s1_931, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_931; __rev2_931 = __builtin_shufflevector(__s2_931, __s2_931, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_931 = __noswap_vfmlslq_low_f16(__rev0_931, __rev1_931, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931)}); \ + __ret_931 = __builtin_shufflevector(__ret_931, __ret_931, 3, 2, 1, 0); \ + __ret_931; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_low_f16(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ + float32x2_t __ret_932; \ + float32x2_t __s0_932 = __p0_932; \ + float16x4_t __s1_932 = __p1_932; \ + float16x8_t __s2_932 = __p2_932; \ + __ret_932 = vfmlsl_low_f16(__s0_932, __s1_932, (float16x4_t) {vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932)}); \ + __ret_932; \ +}) +#else +#define vfmlsl_laneq_low_f16(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ + float32x2_t __ret_933; \ + float32x2_t __s0_933 = __p0_933; \ + float16x4_t __s1_933 = __p1_933; \ + float16x8_t __s2_933 = __p2_933; \ + float32x2_t __rev0_933; __rev0_933 = __builtin_shufflevector(__s0_933, __s0_933, 1, 0); \ + float16x4_t __rev1_933; __rev1_933 = __builtin_shufflevector(__s1_933, __s1_933, 3, 2, 1, 0); \ + float16x8_t __rev2_933; __rev2_933 = __builtin_shufflevector(__s2_933, __s2_933, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_933 = __noswap_vfmlsl_low_f16(__rev0_933, __rev1_933, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933)}); \ + __ret_933 = __builtin_shufflevector(__ret_933, __ret_933, 1, 0); \ + __ret_933; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_lane_f16(__p0_934, __p1_934, __p2_934) __extension__ ({ \ + float16_t __ret_934; \ + float16_t __s0_934 = __p0_934; \ + float16x4_t __s1_934 = __p1_934; \ + __ret_934 = __s0_934 * vget_lane_f16(__s1_934, __p2_934); \ + __ret_934; \ +}) +#else +#define vmulh_lane_f16(__p0_935, __p1_935, __p2_935) __extension__ ({ \ + float16_t __ret_935; \ + float16_t __s0_935 = __p0_935; \ + float16x4_t __s1_935 = __p1_935; \ + float16x4_t __rev1_935; __rev1_935 = __builtin_shufflevector(__s1_935, __s1_935, 3, 2, 1, 0); \ + __ret_935 = __s0_935 * __noswap_vget_lane_f16(__rev1_935, __p2_935); \ + __ret_935; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_laneq_f16(__p0_936, __p1_936, __p2_936) __extension__ ({ \ + float16_t __ret_936; \ + float16_t __s0_936 = __p0_936; \ + float16x8_t __s1_936 = __p1_936; \ + __ret_936 = __s0_936 * vgetq_lane_f16(__s1_936, __p2_936); \ + __ret_936; \ +}) +#else +#define vmulh_laneq_f16(__p0_937, __p1_937, __p2_937) __extension__ ({ \ + float16_t __ret_937; \ + float16_t __s0_937 = __p0_937; \ + float16x8_t __s1_937 = __p1_937; \ + float16x8_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_937 = __s0_937 * __noswap_vgetq_lane_f16(__rev1_937, __p2_937); \ + __ret_937; \ +}) +#endif + +#endif +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdl_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vabdl_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdl_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdl_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vabdl_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdl_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); + return __ret; +} +#endif + +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif + +#undef __ai + +#endif /* if !defined(__ARM_NEON) */ +#endif /* ifndef __ARM_FP */ +#endif /* __ARM_NEON_H */ diff --git a/software/lib/sw_lib/devices/include/arm_vector_types.h b/software/lib/sw_lib/devices/include/arm_vector_types.h new file mode 100644 index 0000000000000000000000000000000000000000..b0dd66b07f9b5be9d86279853a8ea296bd48643d --- /dev/null +++ b/software/lib/sw_lib/devices/include/arm_vector_types.h @@ -0,0 +1,345 @@ +/*===---- arm_vector_types - ARM vector type ------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined(__ARM_NEON_H) && !defined(__ARM_SVE_H) +#error "This file should not be used standalone. Please include arm_neon.h or arm_sve.h instead" + +#endif +#ifndef __ARM_NEON_TYPES_H +#define __ARM_NEON_TYPES_H +typedef float float32_t; +typedef __fp16 float16_t; +#ifdef __aarch64__ +typedef double float64_t; +#endif + +typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; +typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; +typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; +typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; +typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; +typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; +typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; +typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; +typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; +typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; +typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; +typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; +typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; +typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; +typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; +typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; +typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; +typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; +typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; +typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; +#ifdef __aarch64__ +typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; +typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; +#endif + +typedef struct int8x8x2_t { + int8x8_t val[2]; +} int8x8x2_t; + +typedef struct int8x16x2_t { + int8x16_t val[2]; +} int8x16x2_t; + +typedef struct int16x4x2_t { + int16x4_t val[2]; +} int16x4x2_t; + +typedef struct int16x8x2_t { + int16x8_t val[2]; +} int16x8x2_t; + +typedef struct int32x2x2_t { + int32x2_t val[2]; +} int32x2x2_t; + +typedef struct int32x4x2_t { + int32x4_t val[2]; +} int32x4x2_t; + +typedef struct int64x1x2_t { + int64x1_t val[2]; +} int64x1x2_t; + +typedef struct int64x2x2_t { + int64x2_t val[2]; +} int64x2x2_t; + +typedef struct uint8x8x2_t { + uint8x8_t val[2]; +} uint8x8x2_t; + +typedef struct uint8x16x2_t { + uint8x16_t val[2]; +} uint8x16x2_t; + +typedef struct uint16x4x2_t { + uint16x4_t val[2]; +} uint16x4x2_t; + +typedef struct uint16x8x2_t { + uint16x8_t val[2]; +} uint16x8x2_t; + +typedef struct uint32x2x2_t { + uint32x2_t val[2]; +} uint32x2x2_t; + +typedef struct uint32x4x2_t { + uint32x4_t val[2]; +} uint32x4x2_t; + +typedef struct uint64x1x2_t { + uint64x1_t val[2]; +} uint64x1x2_t; + +typedef struct uint64x2x2_t { + uint64x2_t val[2]; +} uint64x2x2_t; + +typedef struct float16x4x2_t { + float16x4_t val[2]; +} float16x4x2_t; + +typedef struct float16x8x2_t { + float16x8_t val[2]; +} float16x8x2_t; + +typedef struct float32x2x2_t { + float32x2_t val[2]; +} float32x2x2_t; + +typedef struct float32x4x2_t { + float32x4_t val[2]; +} float32x4x2_t; + +#ifdef __aarch64__ +typedef struct float64x1x2_t { + float64x1_t val[2]; +} float64x1x2_t; + +typedef struct float64x2x2_t { + float64x2_t val[2]; +} float64x2x2_t; + +#endif +typedef struct int8x8x3_t { + int8x8_t val[3]; +} int8x8x3_t; + +typedef struct int8x16x3_t { + int8x16_t val[3]; +} int8x16x3_t; + +typedef struct int16x4x3_t { + int16x4_t val[3]; +} int16x4x3_t; + +typedef struct int16x8x3_t { + int16x8_t val[3]; +} int16x8x3_t; + +typedef struct int32x2x3_t { + int32x2_t val[3]; +} int32x2x3_t; + +typedef struct int32x4x3_t { + int32x4_t val[3]; +} int32x4x3_t; + +typedef struct int64x1x3_t { + int64x1_t val[3]; +} int64x1x3_t; + +typedef struct int64x2x3_t { + int64x2_t val[3]; +} int64x2x3_t; + +typedef struct uint8x8x3_t { + uint8x8_t val[3]; +} uint8x8x3_t; + +typedef struct uint8x16x3_t { + uint8x16_t val[3]; +} uint8x16x3_t; + +typedef struct uint16x4x3_t { + uint16x4_t val[3]; +} uint16x4x3_t; + +typedef struct uint16x8x3_t { + uint16x8_t val[3]; +} uint16x8x3_t; + +typedef struct uint32x2x3_t { + uint32x2_t val[3]; +} uint32x2x3_t; + +typedef struct uint32x4x3_t { + uint32x4_t val[3]; +} uint32x4x3_t; + +typedef struct uint64x1x3_t { + uint64x1_t val[3]; +} uint64x1x3_t; + +typedef struct uint64x2x3_t { + uint64x2_t val[3]; +} uint64x2x3_t; + +typedef struct float16x4x3_t { + float16x4_t val[3]; +} float16x4x3_t; + +typedef struct float16x8x3_t { + float16x8_t val[3]; +} float16x8x3_t; + +typedef struct float32x2x3_t { + float32x2_t val[3]; +} float32x2x3_t; + +typedef struct float32x4x3_t { + float32x4_t val[3]; +} float32x4x3_t; + +#ifdef __aarch64__ +typedef struct float64x1x3_t { + float64x1_t val[3]; +} float64x1x3_t; + +typedef struct float64x2x3_t { + float64x2_t val[3]; +} float64x2x3_t; + +#endif +typedef struct int8x8x4_t { + int8x8_t val[4]; +} int8x8x4_t; + +typedef struct int8x16x4_t { + int8x16_t val[4]; +} int8x16x4_t; + +typedef struct int16x4x4_t { + int16x4_t val[4]; +} int16x4x4_t; + +typedef struct int16x8x4_t { + int16x8_t val[4]; +} int16x8x4_t; + +typedef struct int32x2x4_t { + int32x2_t val[4]; +} int32x2x4_t; + +typedef struct int32x4x4_t { + int32x4_t val[4]; +} int32x4x4_t; + +typedef struct int64x1x4_t { + int64x1_t val[4]; +} int64x1x4_t; + +typedef struct int64x2x4_t { + int64x2_t val[4]; +} int64x2x4_t; + +typedef struct uint8x8x4_t { + uint8x8_t val[4]; +} uint8x8x4_t; + +typedef struct uint8x16x4_t { + uint8x16_t val[4]; +} uint8x16x4_t; + +typedef struct uint16x4x4_t { + uint16x4_t val[4]; +} uint16x4x4_t; + +typedef struct uint16x8x4_t { + uint16x8_t val[4]; +} uint16x8x4_t; + +typedef struct uint32x2x4_t { + uint32x2_t val[4]; +} uint32x2x4_t; + +typedef struct uint32x4x4_t { + uint32x4_t val[4]; +} uint32x4x4_t; + +typedef struct uint64x1x4_t { + uint64x1_t val[4]; +} uint64x1x4_t; + +typedef struct uint64x2x4_t { + uint64x2_t val[4]; +} uint64x2x4_t; + +typedef struct float16x4x4_t { + float16x4_t val[4]; +} float16x4x4_t; + +typedef struct float16x8x4_t { + float16x8_t val[4]; +} float16x8x4_t; + +typedef struct float32x2x4_t { + float32x2_t val[4]; +} float32x2x4_t; + +typedef struct float32x4x4_t { + float32x4_t val[4]; +} float32x4x4_t; + +#ifdef __aarch64__ +typedef struct float64x1x4_t { + float64x1_t val[4]; +} float64x1x4_t; + +typedef struct float64x2x4_t { + float64x2_t val[4]; +} float64x2x4_t; + +#endif +typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t; +typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t; + +typedef struct bfloat16x4x2_t { + bfloat16x4_t val[2]; +} bfloat16x4x2_t; + +typedef struct bfloat16x8x2_t { + bfloat16x8_t val[2]; +} bfloat16x8x2_t; + +typedef struct bfloat16x4x3_t { + bfloat16x4_t val[3]; +} bfloat16x4x3_t; + +typedef struct bfloat16x8x3_t { + bfloat16x8_t val[3]; +} bfloat16x8x3_t; + +typedef struct bfloat16x4x4_t { + bfloat16x4_t val[4]; +} bfloat16x4x4_t; + +typedef struct bfloat16x8x4_t { + bfloat16x8_t val[4]; +} bfloat16x8x4_t; + +#endif // __ARM_NEON_TYPES_H diff --git a/software/lib/sw_lib/devices/include/gic400.h b/software/lib/sw_lib/devices/include/gic400.h new file mode 100755 index 0000000000000000000000000000000000000000..1b2f47a75c26d749f309b6ca3e27a52f0f67ad7f --- /dev/null +++ b/software/lib/sw_lib/devices/include/gic400.h @@ -0,0 +1,161 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef __GIC400_H__ +#define __GIC400_H__ + +#include <stdio.h> + +#define ENABLE_GROUP0 1U << 0 +#define ENABLE_GROUP1 1U << 1 + +/* Register Definitions*/ + +struct set_and_clear_regs +{ + volatile unsigned int set[32], clear[32]; +}; + +typedef struct +{ + volatile unsigned int control; /* 0x000 */ + volatile unsigned const int controller_type; /* 0x004 */ + volatile unsigned int iidr; /* 0x008 */ + const unsigned int padding1[29]; /* 0x010 */ + unsigned int group[32]; /* 0x080 */ + struct set_and_clear_regs enable; /* 0x100 */ + struct set_and_clear_regs pending; /* 0x200 */ + struct set_and_clear_regs active; /* 0x300 */ + volatile unsigned int priority[256]; /* 0x400 */ + volatile unsigned int target[256]; /* 0x800 */ + volatile unsigned int configuration[64]; /* 0xC00 */ + const char padding2[512]; /* 0xD00 */ + volatile unsigned int software_interrupt; /* 0xF00 */ + const char padding3[207]; + volatile unsigned const int pid4; /* 0xFD0 */ + const char padding4[12]; + volatile unsigned const int pid[4]; /* 0xFE0 */ + volatile unsigned const int cid[4]; /* 0xFF0 */ +} interrupt_distributor; + +typedef struct +{ + volatile unsigned int control; /* 0x00 */ + volatile unsigned int priority_mask; /* 0x04 */ + volatile unsigned int binary_point; /* 0x08 */ + volatile unsigned const int interrupt_ack; /* 0x0c */ + volatile unsigned int end_of_interrupt; /* 0x10 */ + volatile unsigned const int running_priority; /* 0x14 */ + volatile unsigned const int highest_pending; /* 0x18 */ + volatile unsigned int aliased_binary_point; /* 0x1C */ + volatile unsigned int aliased_interrupt_ack; /* 0x20 */ + volatile unsigned int aliased_end_of_interript; /* 0x24 */ + +} cpu_interface; + + +typedef struct +{ + volatile unsigned int hcr; /* 0x00 */ + volatile unsigned int vtr; /* 0x04 */ + volatile unsigned int vmcr; /* 0x08 */ + volatile unsigned int padding1; /* 0x0c */ + volatile unsigned int misr; /* 0x10 */ + volatile unsigned int padding2[3]; + volatile unsigned int eisr0; /* 0x20 */ + volatile unsigned int eisr1; /* 0x24 */ + volatile unsigned int padding3[2]; + volatile unsigned int elsr0; /* 0x30 */ + volatile unsigned int elsr1; /* 0x34 */ + volatile unsigned int padding4[2]; + volatile unsigned int padding5[48]; + volatile unsigned int lr[64]; /* 0x100 */ +}virtual_interface_ctrl; + + +typedef struct +{ + volatile unsigned int ctlr; /* 0x00 */ + volatile unsigned int pmr; /* 0x04 */ + volatile unsigned int bpr; /* 0x08 */ + volatile unsigned int iar; /* 0x0c */ + volatile unsigned int eoir; /* 0x10 */ + volatile unsigned int rpr; /* 0x14 */ + volatile unsigned int hppir; /* 0x18 */ + volatile unsigned int abpr; /* 0x1c */ + volatile unsigned int aiar; /* 0x20 */ + volatile unsigned int aeoir; /* 0x24 */ + volatile unsigned int ahppir; /* 0x28 */ + volatile unsigned int padding1; /* 0x2C */ + volatile unsigned int padding2[1012]; + volatile unsigned int dir; /* 0x1000 */ + +} virtual_cpu_interface; + +extern void (*wrapped_irq_vector[][4])(void); + +void irq_handler(void); + + +/* Function prototype - interrupt handler function prototype */ +typedef void (*IntHandler)(int, int); + +/* Function prototype - legacy gic initialise function */ +void gic_initialise (void); + +/* Function prototype - install interrupt handler */ +void gic_install_handler (unsigned int, IntHandler); + +/* Function prototype - enable interrupt */ +void gic_enable_interrupt (unsigned int); + +/* Function prototype - disable interrupt */ +void gic_disable_interrupt (unsigned int); + +/* Function prototype - virtual irq handler */ +void call_handler_virq(unsigned int, unsigned int); + +/* Function prototype - returns interrupt type */ +void gic_interrupt_type(unsigned int, unsigned int); + +/* Function prototype - interrupt handler */ +void irq_handler(void); + +/* Function prototype - target the cpu in multi-cluster multi-cpu environment */ +void intr_target_cpu(unsigned int interrupt, unsigned int cpu, unsigned int set); + +/* Function prototype - set the interrupt security */ +void intr_security_set(unsigned int interrupt, unsigned int set); + +/* Function prototype - set the interrupt security */ +void set_intr_conf(unsigned int int_num, unsigned int int_val); + +/* Function prototype - set the interrupt priority and priority mask */ +void set_intr_priority_mask(unsigned int intr_num, unsigned int intr_pri, unsigned int pri_mask); + +/* Function prototype - set the cpu interface and distributor */ +void gic_init_ci_di(unsigned int fiq_enable); + +/* Function prototype - Gic improved initialise method */ +void gic_initialise_intr(unsigned int interrupt, unsigned int cpu, unsigned int set, unsigned int fiq_enable); + +void gic_read_pids(); +void gic_set_active_interrupt(unsigned int); +void gic_disable_cpu_interface(void); + + +#endif diff --git a/software/lib/sw_lib/devices/include/global_defines.h b/software/lib/sw_lib/devices/include/global_defines.h new file mode 100755 index 0000000000000000000000000000000000000000..c124df3ad440130ff409cbca7c1aa1ef1c136e6e --- /dev/null +++ b/software/lib/sw_lib/devices/include/global_defines.h @@ -0,0 +1,42 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef GLOBAL_DEFINES_H +#define GLOBAL_DEFINES_H + + +#include <stdint.h> /* standard types definitions */ + + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + <strong>IO Type Qualifiers</strong> are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + + +#endif diff --git a/software/lib/sw_lib/devices/include/qspi_flash.h b/software/lib/sw_lib/devices/include/qspi_flash.h new file mode 100644 index 0000000000000000000000000000000000000000..de06f6c771dea64a8004b744d25cfa5091c1eb88 --- /dev/null +++ b/software/lib/sw_lib/devices/include/qspi_flash.h @@ -0,0 +1,61 @@ +#include <stdint.h> + +extern void qspi_enable_cache(); +extern void qspi_xip_enable(); +extern int32_t SPI_READ_JEDIC(); +extern void spi_reset(); +extern void SET_QPI_MODE(); +extern int32_t QPI_READ_JEDIC(); + + +#define QSPI_CONFIG_BASEADDR (0x01000000UL) + +/*------------- SL AHB QSPI --------------------------------------*/ +/** @addtogroup SL AHB QSPI + @{ +*/ +typedef struct +{ + volatile uint32_t QSPI_CONTROL; + volatile uint32_t QSPI_STATUS; + volatile uint32_t QSPI_CMD; + volatile uint32_t QSPI_ADDR; + volatile uint32_t QSPI_RDATA0; + volatile uint32_t QSPI_RDATA1; + volatile uint32_t QSPI_RDATA2; + volatile uint32_t QSPI_RDATA3; + volatile uint32_t QSPI_WDATA0; + volatile uint32_t QSPI_WDATA1; + volatile uint32_t QSPI_WDATA2; + volatile uint32_t QSPI_WDATA3; +} AHB_QSPI_TypeDef; + +#define SL_AHB_QSPI ((AHB_QSPI_TypeDef *) QSPI_CONFIG_BASEADDR) + +#define QSPI_CACHE_CONFIG_ADDR (0x01001000UL) + +/*------------- Arm Flash Cache --------------------------------------*/ +/** @addtogroup Arm Flash Cache + @{ +*/ +typedef struct +{ + volatile uint32_t CCR; + volatile uint32_t SR; + volatile uint32_t IRQMASK; + volatile uint32_t IRQSTAT; + volatile uint32_t HWPARAMS; + volatile uint32_t CSHR; + volatile uint32_t CSMR; +} Arm_Flash_Cache_TypeDef; + +#define CACHE_CTRL ((Arm_Flash_Cache_TypeDef *) QSPI_CACHE_CONFIG_ADDR) + +#define QSPI_DATA_BASEADDR (0x00400000UL) + +typedef struct +{ + volatile uint32_t DATA[512]; +} QSPI_CACHE_TypeDef; + +#define QSPI_CACHE ((QSPI_CACHE_TypeDef *) QSPI_DATA_BASEADDR) diff --git a/software/lib/sw_lib/devices/include/uart_stdout.h b/software/lib/sw_lib/devices/include/uart_stdout.h new file mode 100644 index 0000000000000000000000000000000000000000..e513680e60e5f4b8974f9fd13d58f735d1e66eea --- /dev/null +++ b/software/lib/sw_lib/devices/include/uart_stdout.h @@ -0,0 +1,32 @@ +/* + *----------------------------------------------------------------------------- + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2010-2013 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * SVN Information + * + * Checked In : $Date: 2017-10-10 15:55:38 +0100 (Tue, 10 Oct 2017) $ + * + * Revision : $Revision: 371321 $ + * + * Release Information : Cortex-M System Design Kit-r1p1-00rel0 + *----------------------------------------------------------------------------- + */ +/* Functions for stdout during simulation */ +/* The functions are implemented in shared/software/common/uart_stdout.c */ +#include <stdint.h> + +extern void UartStdOutInit(void); +extern void Uart2StdOutInit(void); +extern unsigned char UartPutc(unsigned char my_ch); +extern unsigned char UartGetc(void); +extern void UartEndSimulation(void); diff --git a/software/lib/sw_lib/devices/src/gic400.c b/software/lib/sw_lib/devices/src/gic400.c new file mode 100755 index 0000000000000000000000000000000000000000..39b6fe715f1ed960e8979d6fc5c2ede0926778aa --- /dev/null +++ b/software/lib/sw_lib/devices/src/gic400.c @@ -0,0 +1,329 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#include <stdio.h> +#include "gic400.h" +#include "uart_stdout.h" +#include "system.h" +#include "sys_memory_map.h" +#ifdef AARCH_V8 +# include "ipc.h" +# include "cpu_asm_codes.h" +#else +# include "helper_functions.h" +#endif + +#define MAX_NO_OF_INTERRUPTS_IMPLEMENTED 512 +#define SPURIOUS_INTERRUPT 1023 +#define INTERRUPT_MASK 0x000003FF +#define CPUID_SHIFT 10 + + +/* DEFAULT ADDRESSES IN MEMORY MAP FOR INTERRUPT CONTROLLER */ +static interrupt_distributor * id = (interrupt_distributor *)(GIC_DISTRIBUTOR_BASE); +static cpu_interface * ci = (cpu_interface *)(GIC_CPU_INTERFACE_BASE); +static virtual_interface_ctrl *vctrl = (virtual_interface_ctrl *)(GIC_VIRTUAL_INTERFACE_CONTROL_BASE); +static virtual_cpu_interface *vci = (virtual_cpu_interface *)(GIC_VIRTUAL_CPU_INTERFACE_BASE); + + +static IntHandler IntHandlers[MAX_NO_OF_INTERRUPTS_IMPLEMENTED] __attribute__((section ("RW"))); + +/* Following function routes the interrupt to targeted CPU */ +void intr_target_cpu(unsigned int interrupt, unsigned int cpu, unsigned int set) +{ + unsigned int word, bit_shift, temp; + + /* There are 4 interrupt target registers per word */ + word = (interrupt / 4); + bit_shift = (interrupt % 4) * 8; + cpu = (1 << cpu) << bit_shift; + + temp = id->target[word]; + if (set) + { + temp |= cpu; + } + else + { + temp &= ~cpu; + } + id->target[word] = temp; + +#ifdef DEBUG + printf("intr_target_cpu: interrupt = %d addr=%x val=%x vtbw=%x\n",interrupt, &id->target[word], id->target[word], temp); +#endif + + instr_sync_barrier(); +} + +/* Following function sets the security of interrupt */ +void intr_security_set(unsigned int interrupt, unsigned int set) +{ + unsigned int word, bit_shift, temp; + + /* There are 4 interrupt target registers per word */ + word = interrupt / 32; + bit_shift = (interrupt % 32); + + /* Non-secure interrupt */ + temp = id->group[word]; + if (set) + { + temp |= (1U << bit_shift); + } + /* Secure interrupt */ + else + { + temp &= ~(1U << bit_shift); + } + + // while(id->group[word] ==0) + id->group[word] = temp; + +#ifdef DEBUG + printf("intr_security_set: interrupt = %d addr=%x val=%x vtbw=%x\n",interrupt, &id->group[word], id->group[word], temp); +#endif + + instr_sync_barrier(); +} + +/* Following function enables the interrupt */ +void gic_enable_interrupt (unsigned int i) +{ + unsigned int word; + + word = i / 32; + i %= 32; + i = 1 << i; + + // while(id->enable.set[word] ==0) + id->enable.set[word] = i; + +#ifdef DEBUG + printf("Enable_intr: addr=%x val=%x vtbw=%x\n",&id->enable.set[word],id->enable.set[word],i); +#endif +} + +/* Following function disables the interrupt */ +void gic_disable_interrupt (unsigned int i) +{ + unsigned int word; + + word = i / 32; + i %= 32; + i = 1 << i; + + // while(id->enable.clear[word]==0) + id->enable.clear[word] = i; +} + +/* Set correct interrupt configuration (level sensitive, 1-N) */ +void set_intr_conf(unsigned int int_num, unsigned int int_val) +{ + unsigned int reg_num = int_num / 16; + unsigned int bit_shift = (int_num % 16) * 2; + + // while(id->configuration[reg_num] ==0) + id->configuration[reg_num] |= (int_val << (bit_shift)); +#ifdef DEBUG + printf("set_intr_conf: interrupt = %d addr=%x val=%x vtbw=%x\n",int_num, &id->configuration[reg_num], id->configuration[reg_num], (int_val << (bit_shift)) ); +#endif + +} + +/* Following function sets the priority and priority masks */ +void set_intr_priority_mask(unsigned int intr_num, unsigned int intr_pri, unsigned int pri_mask) +{ + unsigned int reg_num = (intr_num / 4); + unsigned int bit_shift = (intr_num % 4) * 8; + + if (intr_pri > pri_mask) + printf("Interrupt priority should be lesser than priority mask\n"); + + // while(ci->priority_mask == 0) + ci->priority_mask = pri_mask; + + // while(id->priority[reg_num] == 0) + id->priority[reg_num] |= (intr_pri << (bit_shift)); + +#ifdef DEBUG + printf("priority mask: intr num=%d addr=%x val=%x vtbw=%x\n",intr_num, &ci->priority_mask, ci->priority_mask, pri_mask); + printf("priority mask: addr=%x val=%x vtbw=%x\n", &id->priority[reg_num], id->priority[reg_num], (intr_pri << ((bit_shift))) ); +#endif +} + +/* Following function sets the cpu interface and distributor */ +void gic_init_ci_di(unsigned int fiq_enable) +{ + if(fiq_enable){ + ci->control = 0x9; + + } else { + // while(ci->control ==0) + ci->control = 0x1; + } + + //while(id->control == 0) + id->control = 0x1; + +#ifdef DEBUG + printf("gic_init_ci_di: addr=%x val=%x addr=%x val=%x\n",&ci->control, ci->control, &id->control, id->control); +#endif + +} + +/* Following function configures the GIC, targets the interrupt to cpu and configures priority */ +void gic_initialise_intr(unsigned int interrupt, unsigned int cpu, unsigned int set, unsigned int fiq_enable) +{ + /* Install the handler for v7 code */ + unsigned int affreg; + + /* asm volatile ( */ + /* "MRC p15, 0, %x[output], c0, c0, 5 \n\t" */ + /* : [output] "=r" (affreg) */ + /* : */ + /* : */ + /* ); */ +#ifdef AARCH_V8 + affreg = get_cluster_id(); +#else + affreg = get_processor_affinity(); +#endif // ARM_V8_32BIT + + id->control = 0x00000000; + /* Interrupt target cpu */ + intr_target_cpu(interrupt, cpu, set); + + /* Interrupt (1-N model, Level sensitive) -> 0x1 */ + /* Interrupt (N-N model, edge sensitive) -> 0x2 */ + /* Refer to Interrupt configuration register GICD_ICFGRn */ + set_intr_conf(interrupt, 0x1); + + /* Set priority mask and priority */ + set_intr_priority_mask(interrupt, 0x00, 0xF0); + + /* Set security */ + intr_security_set(interrupt, 0); + + /* Enable gic distributor and CPU interface */ + gic_init_ci_di(fiq_enable); + + } + + +/* Following function installs the handler */ +void gic_install_handler(unsigned int interrupt, IntHandler handler) +{ + if (interrupt > MAX_NO_OF_INTERRUPTS_IMPLEMENTED) { + return; + } + IntHandlers[interrupt] = handler; +} + + +/* Following function calls the interrupt handler */ +void irq_handler(void) +{ + int source, interrupt, raw_interrupt; + + /* service all pending interrupts */ + while (1) + { + /* Get the highest priority interrupt */ + raw_interrupt = ci->interrupt_ack; + source = raw_interrupt >> CPUID_SHIFT; + interrupt = raw_interrupt & INTERRUPT_MASK; + + if (interrupt == SPURIOUS_INTERRUPT) { + break; + } + + /* Call the handler function */ + if (IntHandlers[interrupt]) { + IntHandlers[interrupt](interrupt, source); + } + + // data_sync_barrier(); + + /* Clear the interrupt */ + ci->end_of_interrupt = raw_interrupt; + } + +} + +/* Following function calls the virtual interrupt handler */ +void call_handler_virq(unsigned int interrupt, unsigned int source) +{ + if (IntHandlers[interrupt]) { + IntHandlers[interrupt](interrupt, source); + } +} + +/* Following function identifies the interrupt type */ +void gic_interrupt_type(unsigned int interrupt, unsigned int edge_type) { + + unsigned int tmp_config; + unsigned int config_reg; + unsigned int bit_pos; + + //check interrupt is in range + if(interrupt > MAX_NO_OF_INTERRUPTS_IMPLEMENTED) { + return; + } + + //calculate which GICD_ICFGRn the interrupt is in + config_reg = interrupt / 16; + + //calculate bit position in the GICD_ICFGRn register + //Add 1 as each interrupt has two bits [X:Y] + //Y - is readonly + //X - selects the edge type + bit_pos = (interrupt - (config_reg * 16)) * 2 + 1; + + tmp_config = id->configuration[config_reg]; + + tmp_config = tmp_config | (edge_type << bit_pos); + + id->configuration[config_reg] = tmp_config; + +} + +void gic_read_pids(){ + printf("Addr=%x val=%x\n",&id->pid4, id->pid4); + for (int i=0;i<4;i++){ + printf("Addr=%x val=%x\n",&id->pid[i], id->pid[i]); + } + for (int i=0;i<4;i++){ + printf("Addr=%x val=%x\n",&id->cid[i], id->cid[i]); + } +} + +void gic_set_active_interrupt(unsigned int i){ + unsigned int word; + + word = i / 32; + i %= 32; + i = 1 << i; + id->active.set[word] = i; + +} + +void gic_disable_cpu_interface(void) +{ + /* Disable the CPU Interface */ + ci->control &= ~(ENABLE_GROUP0 | ENABLE_GROUP1); +} diff --git a/software/lib/sw_lib/devices/src/qspi_flash.c b/software/lib/sw_lib/devices/src/qspi_flash.c new file mode 100644 index 0000000000000000000000000000000000000000..4ec87498ed92d4dfa5eddf3d647ac2e8e90fa0d8 --- /dev/null +++ b/software/lib/sw_lib/devices/src/qspi_flash.c @@ -0,0 +1,70 @@ +#include "qspi_flash.h" + +void spi_reset(){ + SL_AHB_QSPI->QSPI_CMD = 0x066; + SL_AHB_QSPI->QSPI_CMD = 0x166; + while(SL_AHB_QSPI->QSPI_STATUS&1){ + ; + } + SL_AHB_QSPI->QSPI_CMD = 0x099; + SL_AHB_QSPI->QSPI_CMD = 0x199; + while(SL_AHB_QSPI->QSPI_STATUS&1){ ;} +} + +void SET_QPI_MODE(){ + SL_AHB_QSPI->QSPI_CMD = 0x038; + SL_AHB_QSPI->QSPI_CMD = 0x138; + while(SL_AHB_QSPI->QSPI_STATUS&1){ ;} + int32_t tmp_QSPI_CONTROL; + tmp_QSPI_CONTROL = SL_AHB_QSPI->QSPI_CONTROL; + tmp_QSPI_CONTROL = tmp_QSPI_CONTROL | 1; + SL_AHB_QSPI->QSPI_CONTROL=tmp_QSPI_CONTROL; + return; +} + +void qspi_enable_cache(){ + uint32_t QSPI_CTRL_tmp; + QSPI_CTRL_tmp=SL_AHB_QSPI->QSPI_CONTROL; + QSPI_CTRL_tmp = QSPI_CTRL_tmp | 1<<8; + SL_AHB_QSPI->QSPI_CONTROL = QSPI_CTRL_tmp; + + CACHE_CTRL->CCR = 0x61; + while((CACHE_CTRL->SR&0x3)!=2){;} + + return; +} + +void qspi_xip_enable(){ + uint32_t QSPI_CTRL_tmp; + // Setup continuous reading + QSPI_CTRL_tmp=SL_AHB_QSPI->QSPI_CONTROL; + QSPI_CTRL_tmp = QSPI_CTRL_tmp | 1<<8; + SL_AHB_QSPI->QSPI_CONTROL = QSPI_CTRL_tmp; + + QSPI_CTRL_tmp=SL_AHB_QSPI->QSPI_CONTROL; + QSPI_CTRL_tmp = QSPI_CTRL_tmp | ((1<<24) + (0x0A0<<16)); + SL_AHB_QSPI->QSPI_CONTROL = QSPI_CTRL_tmp; + // 1 read with op code + uint32_t rdata; + rdata = QSPI_CACHE->DATA[0]; + // set no-opcode mode + QSPI_CTRL_tmp=SL_AHB_QSPI->QSPI_CONTROL; + QSPI_CTRL_tmp = QSPI_CTRL_tmp | (1<<25); + SL_AHB_QSPI->QSPI_CONTROL = QSPI_CTRL_tmp; + return; +} + +int32_t SPI_READ_JEDIC(){ + SL_AHB_QSPI->QSPI_CMD = 0x0002009F; + SL_AHB_QSPI->QSPI_CMD = 0x0002039F; + while(SL_AHB_QSPI->QSPI_STATUS&1){ ;} + return SL_AHB_QSPI->QSPI_RDATA0; +} + +int32_t QPI_READ_JEDIC(){ + SL_AHB_QSPI->QSPI_CMD = 0x000210AF; + SL_AHB_QSPI->QSPI_CMD = 0x000213AF; + while(SL_AHB_QSPI->QSPI_STATUS&1){ ;} + return SL_AHB_QSPI->QSPI_RDATA0; +} + diff --git a/software/lib/sw_lib/devices/src/uart_stdout.c b/software/lib/sw_lib/devices/src/uart_stdout.c new file mode 100644 index 0000000000000000000000000000000000000000..bee199a4fef8ebe4c00f7f36bfb16514772226ee --- /dev/null +++ b/software/lib/sw_lib/devices/src/uart_stdout.c @@ -0,0 +1,65 @@ +/* + *----------------------------------------------------------------------------- + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Limited or its affiliates. + * + * (C) COPYRIGHT 2010-2013 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Limited or its affiliates. + * + * SVN Information + * + * Checked In : $Date: 2017-10-10 15:55:38 +0100 (Tue, 10 Oct 2017) $ + * + * Revision : $Revision: 371321 $ + * + * Release Information : Cortex-M System Design Kit-r1p1-00rel0 + *----------------------------------------------------------------------------- + */ + + /* + + UART functions for retargetting + + */ + +#include "uart_stdout.h" +#include "CMSDK.h" +#define CLKFREQ 100000000 +#define BAUDRATE 3125000 +#define BAUDCLKDIV (CLKFREQ / BAUDRATE) + +void UartStdOutInit(void) +{ + CMSDK_UART2->CTRL = 0x00; // disable whie reprogramming + CMSDK_UART2->BAUDDIV = BAUDCLKDIV; // (100MHz/BAUDRATE) in 16.4 format + CMSDK_UART2->CTRL = 0x01; // TX, standard UART2 + return; +} + + +// Output a character +unsigned char UartPutc(unsigned char my_ch) +{ + while (CMSDK_UART2->STATE & 1); // Wait if Transmit Holding register full + CMSDK_UART2->DATA = my_ch; // write to transmit holding register + return (my_ch); +} +// Get a character +unsigned char UartGetc(void) +{ + while (((CMSDK_UART2->STATE & 2)==0)); + return (CMSDK_UART2->DATA); +} + +void UartEndSimulation(void) +{ + UartPutc((char) 0x4); // End of simulation + while(1); +} + diff --git a/software/lib/sw_lib/host/include/clus_pe_cnt.h b/software/lib/sw_lib/host/include/clus_pe_cnt.h new file mode 100644 index 0000000000000000000000000000000000000000..f51b42114f41aa510be5f44807915d1facead46d --- /dev/null +++ b/software/lib/sw_lib/host/include/clus_pe_cnt.h @@ -0,0 +1,89 @@ + +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- +#ifndef __CLUS_PE_CNT__ +#define __CLUS_PE_CNT__ + +#define NUM_PE_CLUS_0 1 + +#define NUM_PE_CLUS_1 0 +#define NUM_PE_CLUS_2 0 +#define NUM_PE_CLUS_3 0 +#define NUM_PE_CLUS_4 0 +#define NUM_PE_CLUS_5 0 +#define NUM_PE_CLUS_6 0 +#define NUM_PE_CLUS_7 0 +#define NUM_PE_CLUS_8 0 +#define NUM_PE_CLUS_9 0 +#define NUM_PE_CLUS_10 0 +#define NUM_PE_CLUS_11 0 +#define NUM_PE_CLUS_12 0 +#define NUM_PE_CLUS_13 0 +#define NUM_PE_CLUS_14 0 +#define NUM_PE_CLUS_15 0 +#define NUM_PE_CLUS_16 0 +#define NUM_PE_CLUS_17 0 +#define NUM_PE_CLUS_18 0 +#define NUM_PE_CLUS_19 0 +#define NUM_PE_CLUS_20 0 +#define NUM_PE_CLUS_21 0 +#define NUM_PE_CLUS_22 0 +#define NUM_PE_CLUS_23 0 +#define NUM_PE_CLUS_24 0 +#define NUM_PE_CLUS_25 0 +#define NUM_PE_CLUS_26 0 +#define NUM_PE_CLUS_27 0 +#define NUM_PE_CLUS_28 0 +#define NUM_PE_CLUS_29 0 +#define NUM_PE_CLUS_30 0 +#define NUM_PE_CLUS_31 0 +#define NUM_PE_CLUS_32 0 +#define NUM_PE_CLUS_33 0 +#define NUM_PE_CLUS_34 0 +#define NUM_PE_CLUS_35 0 +#define NUM_PE_CLUS_36 0 +#define NUM_PE_CLUS_37 0 +#define NUM_PE_CLUS_38 0 +#define NUM_PE_CLUS_39 0 +#define NUM_PE_CLUS_40 0 +#define NUM_PE_CLUS_41 0 +#define NUM_PE_CLUS_42 0 +#define NUM_PE_CLUS_43 0 +#define NUM_PE_CLUS_44 0 +#define NUM_PE_CLUS_45 0 +#define NUM_PE_CLUS_46 0 +#define NUM_PE_CLUS_47 0 +#define NUM_PE_CLUS_48 0 +#define NUM_PE_CLUS_49 0 +#define NUM_PE_CLUS_50 0 +#define NUM_PE_CLUS_51 0 +#define NUM_PE_CLUS_52 0 +#define NUM_PE_CLUS_53 0 +#define NUM_PE_CLUS_54 0 +#define NUM_PE_CLUS_55 0 +#define NUM_PE_CLUS_56 0 +#define NUM_PE_CLUS_57 0 +#define NUM_PE_CLUS_58 0 +#define NUM_PE_CLUS_59 0 +#define NUM_PE_CLUS_60 0 +#define NUM_PE_CLUS_61 0 +#define NUM_PE_CLUS_62 0 +#define NUM_PE_CLUS_63 0 + +#define NUM_THREADS 1 + +#endif diff --git a/software/lib/sw_lib/host/include/il_mem_map_includes.h b/software/lib/sw_lib/host/include/il_mem_map_includes.h new file mode 100755 index 0000000000000000000000000000000000000000..c1f5cca0686ab47b0c6002dbf171cf586ce9cb03 --- /dev/null +++ b/software/lib/sw_lib/host/include/il_mem_map_includes.h @@ -0,0 +1,36 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef __IL_MEM_MAP_INCLUDES_H__ +#define __IL_MEM_MAP_INCLUDES_H__ + +#include <global_defines.h> + +// ************** REDEFINE TOP LEVEL defines ********************** +// TOP's TRICKBOX + +// ****************** ADD IL LEVEL defines ************************ +// IL's TRICKBOX +// CVM starting point where the CPU synchronisition structure is +#define CPUSYNC_BASE 0x02300000 +//Integration Layer Vultan flash controller APB interface base address + +// Integration layer's Trickbox + +#endif /* __IL_MEM_MAP_INCLUDES_H__ */ + + diff --git a/software/lib/sw_lib/host/include/il_sys_includes.h b/software/lib/sw_lib/host/include/il_sys_includes.h new file mode 100755 index 0000000000000000000000000000000000000000..c0c6a93e0f6dce62ccf8ad3a5ba591f90f00577a --- /dev/null +++ b/software/lib/sw_lib/host/include/il_sys_includes.h @@ -0,0 +1,25 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef __IL_SYS_INCLUDES_H__ +#define __IL_SYS_INCLUDES_H__ + +// *************** IL SYSTEM LEVEL INCLUDES to use in TOP LEVEL context ***************** + +#endif /* __IL_SYS_INCLUDES_H__ */ + + diff --git a/software/lib/sw_lib/host/include/platform.h b/software/lib/sw_lib/host/include/platform.h new file mode 100755 index 0000000000000000000000000000000000000000..4c9cb9d7aa45d86ad3f23819dbd60b5969c5c76b --- /dev/null +++ b/software/lib/sw_lib/host/include/platform.h @@ -0,0 +1,32 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#ifndef __PLATFORM__ +#define __PLATFORM__ +#include <arm_acle.h> +void sync_ext_abort_handler(void); + +void invalidate_l2_caches(void); +void disable_smp(void); +void disable_caches(void); +void enable_icache(void); +uint32_t get_sctlr_val(void); +void write_sctlr_val(uint32_t sctlr_val); +void clear_OS_LOCK(void); +#endif + + diff --git a/software/lib/sw_lib/host/include/sys_intr_map.h b/software/lib/sw_lib/host/include/sys_intr_map.h new file mode 100644 index 0000000000000000000000000000000000000000..ce61979329fded1bd0fb4ca7a96c01b80b91079c --- /dev/null +++ b/software/lib/sw_lib/host/include/sys_intr_map.h @@ -0,0 +1,29 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#define NUM_SHD_INTR 64//PPIs +#define CNTH_INTR 26 +#define CNTV_INTR 27 +#define CNTPS_INTR 29 +#define CNTPNS_INTR 30 +//SPIs +#define UART0_TX_INTR 32 +#define UART0_RX_INTR 33 +#define UART0_TX_OVR_INTR 34 +#define UART0_RX_OVR_INTR 35 +#define UART0_COMB_INTR 36 +#define TIMER0_INTR 37 diff --git a/software/lib/sw_lib/host/include/sys_memory_map.h b/software/lib/sw_lib/host/include/sys_memory_map.h new file mode 100644 index 0000000000000000000000000000000000000000..f9915afeb5725553cb89a3cacd6a10f9baa00466 --- /dev/null +++ b/software/lib/sw_lib/host/include/sys_memory_map.h @@ -0,0 +1,45 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + + + +#define GIC_BASE 0x01100000UL +#define GIC_DISTRIBUTOR_BASE 0x01101000UL +#define GIC_CPU_INTERFACE_BASE 0x01102000UL +#define GIC_VIRTUAL_INTERFACE_CONTROL_BASE 0x01104000UL +#define GIC_VIRTUAL_CPU_INTERFACE_BASE 0x01106000UL + +#define PERIPHERAL_BASE 0x40000000UL +#define SYS_UART0_BASE PERIPHERAL_BASE +#define TIMER0_BASE 0x40001000UL + +#define DAP_DBG_BASE 0x60000000UL + +#define PASS_CODE 0xBEEF1AC0 +#define FAIL_CODE 0xDEADBEEF +#define HOST_CXDT_CODE 0xFFFFFFFF +#define INTR_RTR_NUM_SII 96 +#define INTR_RTR_NUM_ICI 4 +#define INTR_RTR_LDE_LEVEL 2 +#define NUM_CLUSTERS 1 +#define NUM_CORES 1 + + +// ******** Contains REDEFINES for TOP level defines to use in IL context ********** +// ************* Include Intgeration Layer includes always as last ***************** +#include "il_mem_map_includes.h" +// ********************************************************************************* diff --git a/software/lib/sw_lib/host/include/system_defines.h b/software/lib/sw_lib/host/include/system_defines.h new file mode 100755 index 0000000000000000000000000000000000000000..ee604d2c760f4827d20f5d496e8563e00b1ca591 --- /dev/null +++ b/software/lib/sw_lib/host/include/system_defines.h @@ -0,0 +1,16 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- diff --git a/software/lib/sw_lib/host/scat/bootloader.scat b/software/lib/sw_lib/host/scat/bootloader.scat new file mode 100644 index 0000000000000000000000000000000000000000..e4d75fbe759e6076735f70073fdac9bef25bfaeb --- /dev/null +++ b/software/lib/sw_lib/host/scat/bootloader.scat @@ -0,0 +1,28 @@ + + +LoadRegion 0x00000000 +{ + SEC_ROM_BOOT_IMG 0x0000000 + { + boot.s.v8-a.o (SECURE_ROM_BOOT, +FIRST) + *(+RO) + } + +} + +SRAM 0x00800000 +{ + ARM_LIB_STACK 0x00800000 EMPTY 0x2000 + { + } + + ARM_LIB_HEAP 0x00802000 EMPTY 0x2000 + { + + } + + RWZI 0x00804000 0x2000 + { + *(+RW,+ZI) + } +} diff --git a/software/lib/sw_lib/host/scat/default.ld b/software/lib/sw_lib/host/scat/default.ld new file mode 100755 index 0000000000000000000000000000000000000000..80f04dfc3a13c11e89a006b11fd16aba92ce5d8e --- /dev/null +++ b/software/lib/sw_lib/host/scat/default.ld @@ -0,0 +1,150 @@ +/*------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- +*/ + +MEMORY +{ + SSE710_SEC_ROM (rx) : ORIGIN = 0x00000000, LENGTH = 16K + SSE710_SEC_SRAM (rwx) : ORIGIN = 0x00004000, LENGTH = 4096K +} + +GROUP(libgcc.a libc.a libm.a libnosys.a) + +SECTIONS +{ + .SEC_ROM_BOOT_IMG : + { + *boot.s.v8-a.o(SECURE_ROM_BOOT) + FILL(0xDEADDEAD); + . = ORIGIN(SSE710_SEC_ROM) + LENGTH(SSE710_SEC_ROM); + } > SSE710_SEC_ROM + + .LOAD_MAIN : + { + *(.text*) + + KEEP(*(.init)) + KEEP(*(.fini)) + + /* .ctors */ + *crtbegin.o(.ctors) + *crtbegin?.o(.ctors) + *(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors) + *(SORT(.ctors.*)) + *(.ctors) + + /* .dtors */ + *crtbegin.o(.dtors) + *crtbegin?.o(.dtors) + *(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors) + *(SORT(.dtors.*)) + *(.dtors) + + *(.rodata*) + + KEEP(*(.eh_frame*)) + + __data_start__ = .; + *(.data*) + + . = ALIGN(4); + /* preinit data */ + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP(*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + + . = ALIGN(4); + /* init data */ + PROVIDE_HIDDEN (__init_array_start = .); + KEEP(*(SORT(.init_array.*))) + KEEP(*(.init_array)) + PROVIDE_HIDDEN (__init_array_end = .); + + + . = ALIGN(4); + /* finit data */ + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP(*(SORT(.fini_array.*))) + KEEP(*(.fini_array)) + PROVIDE_HIDDEN (__fini_array_end = .); + + KEEP(*(.jcr*)) + . = ALIGN(4); + /* All data end */ + __data_end__ = .; + + } > SSE710_SEC_SRAM + + .ARM.extab : + { + *(.ARM.extab* .gnu.linkonce.armextab.*) + } > SSE710_SEC_SRAM + + __exidx_start = .; + .ARM.exidx : + { + *(.ARM.exidx* .gnu.linkonce.armexidx.*) + } > SSE710_SEC_SRAM + __exidx_end = .; + + .bss : + { + . = ALIGN(4); + __bss_start__ = .; + *(.bss*) + *(COMMON) + . = ALIGN(4); + __bss_end__ = .; + } > SSE710_SEC_SRAM + + .PAGE_TABLE_SECTIONS 0x02100000 : + { + *page_table.s.v8-a.o(*) + } > SSE710_SEC_SRAM + + /* Notes section + * This is not used so we discard it. Although not used it needs to be + * explicitly mentioned in the linker script as some toolchains will place + * the notes section at address 0 if it is not explicitly mentioned. */ + /DISCARD/ : { *(.note*) } + + .heap 0x02310000 (COPY): + { + __end__ = .; + end = __end__; + *(.heap*) + __HeapLimit = .; + } > SSE710_SEC_SRAM + + /* .stack_dummy section doesn't contains any symbols. It is only + * used for linker to calculate size of stack sections, and assign + * values to stack symbols later */ + .stack_dummy 0x023c0000 (COPY): + { + *(.stack*) + } > SSE710_SEC_SRAM + + /* Set stack top to end of RAM, and stack limit move down by + * size of stack_dummy section */ + __StackTop = ORIGIN(SSE710_SEC_SRAM) + LENGTH(SSE710_SEC_SRAM); + __StackLimit = __StackTop - SIZEOF(.stack_dummy); + PROVIDE(__stack = __StackTop); + + /* Check if data + heap + stack exceeds RAM limit */ + ASSERT(__StackLimit >= __HeapLimit, "region RAM overflowed with stack") +} + diff --git a/software/lib/sw_lib/host/scat/default.scat b/software/lib/sw_lib/host/scat/default.scat new file mode 100755 index 0000000000000000000000000000000000000000..7634ee5d7692c0c3ef65cbab83a2f973cb117c9f --- /dev/null +++ b/software/lib/sw_lib/host/scat/default.scat @@ -0,0 +1,43 @@ +;;------------------------------------------------------------------------------ +;; The confidential and proprietary information contained in this file may +;; only be used by a person authorised under and to the extent permitted +;; by a subsisting licensing agreement from Arm Limited or its affiliates. +;; +;; (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +;; ALL RIGHTS RESERVED +;; +;; This entire notice must be reproduced on all copies of this file +;; and copies of this file may only be made by a person if such person is +;; permitted to do so under the terms of a subsisting license agreement +;; from Arm Limited or its affiliates. +;; +;; Release Information : SSE710-r0p0-00rel0 +;; +;;------------------------------------------------------------------------------ + +ROM 0x00400000 +{ + EXEC_ROM 0x00400000 0x1000000 + { + boot.s.v8-a.o (SECURE_ROM_BOOT, +FIRST) + *(+RO) + } +} + +SRAM 0x00800000 +{ + RWZI 0x00800000 0x4000 + { + *(+RW,+ZI) + } + + ARM_LIB_STACK 0x00804000 EMPTY 0x2000 + { + } + + ARM_LIB_HEAP 0x00806000 EMPTY 0x2000 + { + } + +} + diff --git a/software/lib/sw_lib/host/src/sys_utils.c b/software/lib/sw_lib/host/src/sys_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..39fe34f2f542ce102a2a6a9b50ab75a1d1a1f372 --- /dev/null +++ b/software/lib/sw_lib/host/src/sys_utils.c @@ -0,0 +1,114 @@ +//------------------------------------------------------------------------------ +// The confidential and proprietary information contained in this file may +// only be used by a person authorised under and to the extent permitted +// by a subsisting licensing agreement from Arm Limited or its affiliates. +// +// (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +// ALL RIGHTS RESERVED +// +// This entire notice must be reproduced on all copies of this file +// and copies of this file may only be made by a person if such person is +// permitted to do so under the terms of a subsisting license agreement +// from Arm Limited or its affiliates. +// +// Release Information : SSE710-r0p0-00rel0 +// +// ----------------------------------------------------------------------------- + +#include <stdio.h> +#include <stdarg.h> +#include "system.h" +#include "sys_memory_map.h" +#include "sys_intr_map.h" +#include "host_chassis_control.h" +#include "uart_stdout.h" +#include "cpu_asm_codes.h" +// ************* Include Intgeration Layer includes always as last ***************** +#include "il_sys_includes.h" +// ********************************************************************************* + +//Integration layer init function +// __attribute__((weak)) void il_init(void){ +// HOST_CHASSIS_CTRL_TypeDef *host_cpu_chassis_control = ((HOST_CHASSIS_CTRL_TypeDef *) HOST_CPU_HOST_BASE_CONTROL_BASE); +// printf("Extsys0 and Extsys1 CPUWAIT are de-asserted.\n"); +// host_cpu_chassis_control->EXT_SYS0_RST_CTRL.B.CPUWAIT = 0; +// host_cpu_chassis_control->EXT_SYS1_RST_CTRL.B.CPUWAIT = 0; +// } + +// __attribute__((weak)) int main_ext(){ +// call_wfi(); +// return 0; +// } + + +/**----------------------------------------------------------------------------- + Function name : cpu_test_hook + Input Parameters : void + Return Type : void + Descritpion : 1. Hook function to be called from the cpu_test function + 2. __weak definition as overriden by test +------------------------------------------------------------------------------*/ +// __attribute__((weak)) void cpu_test_hook(void) { +// } + + +// @override +// Override weak Moonshine function in apps_v8_32/src/cpu_asm_codes.c, +// to not print unwanted characters in the log with slave CPUs +/**----------------------------------------------------------------------------- + Function name : cpu_test + Input Parameters : void + Return Type : void + Descritpion : 1. Entry point for all slave CPU's + 2. __weak defination as overriden by test + 3. In the absence of Tets implementation, avoids data corruption and prints & +------------------------------------------------------------------------------*/ +// void cpu_test(void) +// { +// HOST_CHASSIS_CTRL_TypeDef *host_cpu_chassis_control = ((HOST_CHASSIS_CTRL_TypeDef *) HOST_CPU_HOST_BASE_CONTROL_BASE); + +// unsigned int cpu_num; +// cpu_num = get_cpu_core_number(); + +// cpu_test_hook(); +// while(1) { +// call_wfe(); +// } +// } + + + +// extern int main(); + +// void master_cpu_entry(void){ +// // Clear wakeup, so CORE0 can turn off if needed +// HOST_CHASSIS_CTRL_TypeDef *host_cpu_chassis_control = ((HOST_CHASSIS_CTRL_TypeDef *) HOST_CPU_HOST_BASE_CONTROL_BASE); +// host_cpu_chassis_control->HOST_CPU_WAKEUP.B.CORE0_WAKEUP = 0x0; + +// main(); +// call_wfi(); +// } + + +/**----------------------------------------------------------------------------- + Function name : TEST_PASS + Input Parameters : void + Return Type : void + Descritpion : Test pass function + ------------------------------------------------------------------------------*/ + +void TEST_PASS(void) { + call_wfi(); +} + +/**----------------------------------------------------------------------------- + Function name : TEST_FAIL + Input Parameters : void + Return Type : void + Descritpion : Test fail function + ------------------------------------------------------------------------------*/ + +void TEST_FAIL(void) { + call_wfi(); +} + diff --git a/software/shared/bin/cxdt.bin b/software/shared/bin/cxdt.bin new file mode 120000 index 0000000000000000000000000000000000000000..1f9d0462b8cc1baaadfeefbfad35c08ea4fad1e5 --- /dev/null +++ b/software/shared/bin/cxdt.bin @@ -0,0 +1 @@ +/home/dwn1c21/SoC-Labs/megasoc_project/megasoc_tech/software/shared/bin/cxdt_empty.bin \ No newline at end of file diff --git a/software/shared/bin/cxdt_empty.bin b/software/shared/bin/cxdt_empty.bin new file mode 100755 index 0000000000000000000000000000000000000000..0b63655ec2098d092a67e7e69c0085b3527c4980 Binary files /dev/null and b/software/shared/bin/cxdt_empty.bin differ diff --git a/software/shared/tools/bin/memory_image_scripts/cg071_elf2hex b/software/shared/tools/bin/memory_image_scripts/cg071_elf2hex new file mode 100755 index 0000000000000000000000000000000000000000..06f0e85d46acffa4a2d6b5ea9dfb3c1e67754eb1 Binary files /dev/null and b/software/shared/tools/bin/memory_image_scripts/cg071_elf2hex differ diff --git a/software/shared/tools/bin/memory_image_scripts/cg071_elf2mem b/software/shared/tools/bin/memory_image_scripts/cg071_elf2mem new file mode 100755 index 0000000000000000000000000000000000000000..169d8b6ee389020f7a9fcd22245e1ff1de605184 Binary files /dev/null and b/software/shared/tools/bin/memory_image_scripts/cg071_elf2mem differ diff --git a/software/shared/tools/bin/memory_image_scripts/cg071_gcc_hex_convert.pl b/software/shared/tools/bin/memory_image_scripts/cg071_gcc_hex_convert.pl new file mode 100755 index 0000000000000000000000000000000000000000..25272ed31c7778d0ee64fd70a7611809e8b44d6a --- /dev/null +++ b/software/shared/tools/bin/memory_image_scripts/cg071_gcc_hex_convert.pl @@ -0,0 +1,56 @@ +#!/usr/bin/env perl +####################################################################################################################### +# +# The confidential and proprietary information contained in this file may +# only be used by a person authorised under and to the extent permitted +# by a subsisting licensing agreement from Arm Limited or its affiliates. +# +# (C) COPYRIGHT 2011-2021 Arm Limited or its affiliates. +# ALL RIGHTS RESERVED +# +# This entire notice must be reproduced on all copies of this file +# and copies of this file may only be made by a person if such person is +# permitted to do so under the terms of a subsisting license agreement +# from Arm Limited or its affiliates. +# +####################################################################################################################### + +#use strict; +#use warnings; +use Getopt::Long; + +my $input_file = "test_s.hex"; +my $output_file = "test_s.hex"; +my $add_offset = "0"; +my $sub_offset = "10000000"; + + # Parse our command-line arguments + GetOptions ("in=s" => \$input_file, + "out=s" => \$output_file, + "add-offset=s" => \$add_offset, + "sub-offset=s" => \$sub_offset) + or die("Error in command line arguments\n"); + print "Input: $input_file, Output: $output_file\n"; + +my $input_lines; +my $output_lines; + + # Slurp our file + open(INFILE, $input_file) or die "Error opening input file $input_file: $!\n"; + {local $/; undef $/; $input_lines = <INFILE>;} + close INFILE; + + # Generate 32 bits/line and re-align addresses + $output_lines = $input_lines; +undef $input_lines; + $output_lines =~ s|(\S\S) (\S\S) (\S\S) (\S\S)\s*|$4$3$2$1\n|g; + $output_lines =~ s|\n+|\n|g; + $output_lines =~ s|\@(\S+)|"\@".sprintf("%X", (hex($1)-hex($sub_offset)+hex($add_offset))/4)|ge; + + # Write contents to target file + open(my $OUTFILE, '>', $output_file) or die "Error opening output file $output_file: $!\n"; + print $OUTFILE $output_lines; + close $OUTFILE; + +__END__ + diff --git a/software/src/bootloader/bootloader.c b/software/src/bootloader/bootloader.c new file mode 100644 index 0000000000000000000000000000000000000000..7f4c0add1b27ba7d7188a69eb7c749bd8ea16d74 --- /dev/null +++ b/software/src/bootloader/bootloader.c @@ -0,0 +1,27 @@ +//#include "host_chassis_control.h" +//#include "system_level_functions.h" +//#include "intrinsics.h" + +#include "system.h" +#include "qspi_flash.h" +#include "cpu_asm_codes.h" +#include "uart_stdout.h" +#include <stdio.h> +int main(void) { + uint32_t errors = 0; + UartStdOutInit(); + + printf("\n***SoCLabs MegaSoC***\n"); + enable_caches(); + enable_caches_el1(); + //spi_reset(); + //int32_t rID = SPI_READ_JEDIC(); + //SET_QPI_MODE(); + //rID = QPI_READ_JEDIC(); + //qspi_enable_cache(); + //qspi_xip_enable(); + printf("***Flash Enabled...Booting***\n\n\n"); + + void (*main_code)(void) = (void (*)())0x00400000; + main_code(); +} \ No newline at end of file diff --git a/software/src/bootloader/makefile b/software/src/bootloader/makefile new file mode 100644 index 0000000000000000000000000000000000000000..cb8fcaec23eff581e77f2bf591cbfc3f5b6d48b9 --- /dev/null +++ b/software/src/bootloader/makefile @@ -0,0 +1,65 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +TEST_PATH := $(CURDIR) +TEST_ID := $(notdir $(TEST_PATH)) + +export TEST_PATH +export TEST_ID + +############################################################################# +#### Variables globally applied for all images ###### +############################################################################# + +ifeq ($(TEST_VERBOSITY), 1) + GLOBAL_TESTDEFINES := -DTEST_VERBOSITY_HIGH +endif + +############################################################################# +############# Variables necessary for compiling image for Cortex A53 ######## +############################################################################# + +APP_TESTDEFINES := + +APP_SCAT := bootloader +APP_LD_FLAGS := +APP_AS_FLAGS := -DMASTER_CPU_REF=0x0 -DMASTER_CLUS_REF=0x0 #-DTTBR0_BASE=0x02100000 -DVBAR_ADDR=0x02000000 +APP_AS_INC_DIR := +APP_C_FLAGS := $(APP_TESTDEFINES) $(GLOBAL_TESTDEFINES) #-DTTBR0_BASE=0x02100000 +APP_C_INC_DIR := $(TEST_PATH) +APP_SRC := bootloader.c +BOOT_CODE := 1 +export BOOT_CODE +export APP_SRC +export APP_C_FLAGS +export APP_C_INC_DIR +export APP_AS_FLAGS +export APP_AS_INC_DIR +export APP_LD_FLAGS +export APP_SCAT + + +############################################################################### +############################################################################### +############################################################################### + +include $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.sim + + + diff --git a/software/src/dap_tests/dap_tests.c b/software/src/dap_tests/dap_tests.c new file mode 100644 index 0000000000000000000000000000000000000000..9c10c92186b8d5e12b9067d70cee75bae90e51cb --- /dev/null +++ b/software/src/dap_tests/dap_tests.c @@ -0,0 +1,53 @@ +#include "uart_stdout.h" +#include <stdio.h> +#include "system.h" +#include "sys_memory_map.h" + +#define HW32_REG(ADDRESS) (*((volatile unsigned long *)(ADDRESS))) +#define HW8_REG(ADDRESS) (*((volatile unsigned char *)(ADDRESS))) + +int dap_id_check(void); + +int main(void) { + uint32_t errors = 0; + UartStdOutInit(); + + printf("DAP debug port tests - SoCLabs MegaSoC\n"); + + if(dap_id_check()!=0){ + printf("DAP not present\n"); + printf ("\n** TEST SKIPPED **\n"); + UartEndSimulation(); + } + + UartEndSimulation(); +} + +/* --------------------------------------------------------------- */ +/* Peripheral detection */ +/* --------------------------------------------------------------- */ +/* Detect the part number to see if device is present */ +int dap_id_check(void) +{ + unsigned char PIDR0, PIDR1, PIDR2, PIDR3, PIDR4, PIDR5, PIDR6, PIDR7; + PIDR4=HW8_REG(DAP_DBG_BASE+0x0FD0); + PIDR5=HW8_REG(DAP_DBG_BASE+0x0FD4); + PIDR6=HW8_REG(DAP_DBG_BASE+0x0FD8); + PIDR7=HW8_REG(DAP_DBG_BASE+0x0FDC); + PIDR0=HW8_REG(DAP_DBG_BASE+0x0FE0); + PIDR1=HW8_REG(DAP_DBG_BASE+0x0FE4); + PIDR2=HW8_REG(DAP_DBG_BASE+0x0FE8); + PIDR3=HW8_REG(DAP_DBG_BASE+0x0FEC); + printf("PIDR = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", PIDR0,PIDR1,PIDR2,PIDR3,PIDR4,PIDR5,PIDR6,PIDR7); + if ((PIDR4 != 0x04) || + (PIDR5 != 0x00) || + (PIDR6 != 0x00) || + (PIDR7 != 0x00) || + (PIDR0 != 0xA1) || + (PIDR1 != 0xB4) || + (PIDR2 != 0x4B) || + (PIDR3 != 0x00)) + return 1; /* part ID & ARM ID does not match */ + else + return 0; +} diff --git a/software/src/dap_tests/makefile b/software/src/dap_tests/makefile new file mode 100644 index 0000000000000000000000000000000000000000..8f10b4ad7f3586884ba6264c79c6953a5fdf7508 --- /dev/null +++ b/software/src/dap_tests/makefile @@ -0,0 +1,63 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +TEST_PATH := $(CURDIR) +TEST_ID := $(notdir $(TEST_PATH)) + +export TEST_PATH +export TEST_ID + +############################################################################# +#### Variables globally applied for all images ###### +############################################################################# + +ifeq ($(TEST_VERBOSITY), 1) + GLOBAL_TESTDEFINES := -DTEST_VERBOSITY_HIGH +endif + +############################################################################# +############# Variables necessary for compiling image for Cortex A53 ######## +############################################################################# + +APP_TESTDEFINES := + +APP_SCAT := default +APP_LD_FLAGS := +APP_AS_FLAGS := -DMASTER_CPU_REF=0x0 -DMASTER_CLUS_REF=0x0 -DTTBR0_BASE=0x02100000 -DVBAR_ADDR=0x02000000 +APP_AS_INC_DIR := +APP_C_FLAGS := $(APP_TESTDEFINES) $(GLOBAL_TESTDEFINES) -DTTBR0_BASE=0x02100000 +APP_C_INC_DIR := $(TEST_PATH) +APP_SRC := dap_tests.c + +export APP_SRC +export APP_C_FLAGS +export APP_C_INC_DIR +export APP_AS_FLAGS +export APP_AS_INC_DIR +export APP_LD_FLAGS +export APP_SCAT + +############################################################################### +############################################################################### +############################################################################### + +include $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.sim + + + diff --git a/software/src/gic_tests/gic_tests.c b/software/src/gic_tests/gic_tests.c new file mode 100644 index 0000000000000000000000000000000000000000..29fd1f80f9f6226b84a4b4ef79b8f3479bab1703 --- /dev/null +++ b/software/src/gic_tests/gic_tests.c @@ -0,0 +1,147 @@ +#include "uart_stdout.h" +#include "sys_memory_map.h" +#include "sys_intr_map.h" +#include <stdio.h> +#include "gic400.h" +#include "CMSDK.h" +#include "irq.h" + + +int timer0_id_check(void); +int timer_interrupt_test_1(CMSDK_TIMER_TypeDef *CMSDK_TIMER); +static void timer_interrupt(int num, int src); + +/* peripheral and component ID values */ +#define APB_TIMER_PID4 0x04 +#define APB_TIMER_PID5 0x00 +#define APB_TIMER_PID6 0x00 +#define APB_TIMER_PID7 0x00 +#define APB_TIMER_PID0 0x22 +#define APB_TIMER_PID1 0xB8 +#define APB_TIMER_PID2 0x1B +#define APB_TIMER_PID3 0x00 +#define APB_TIMER_CID0 0x0D +#define APB_TIMER_CID1 0xF0 +#define APB_TIMER_CID2 0x05 +#define APB_TIMER_CID3 0xB1 +#define HW32_REG(ADDRESS) (*((volatile unsigned long *)(ADDRESS))) +#define HW8_REG(ADDRESS) (*((volatile unsigned char *)(ADDRESS))) + +/* Global variables */ +volatile int timer0_irq_occurred; +volatile int timer1_irq_occurred; +volatile int timer0_irq_expected; +volatile int timer1_irq_expected; +volatile int counter; + + +int main(void) { + uint32_t errors = 0; + UartStdOutInit(); + + printf("GIC tests - SoCLabs MegaSoC\n"); + + if(timer0_id_check()!=0){ + printf("Timer 0 not present skipping test\n"); + printf ("\n** TEST SKIPPED **\n"); + UartEndSimulation(); + } + // Timer present - continue + errors += timer_interrupt_test_1(CMSDK_TIMER0); + + UartEndSimulation(); +} + + +/* --------------------------------------------------------------- */ +/* Peripheral detection */ +/* --------------------------------------------------------------- */ +/* Detect the part number to see if device is present */ +int timer0_id_check(void) +{ + uint32_t timer_id; + uint32_t ID0, ID1; + uint32_t timer_ctrl; + timer_ctrl = CMSDK_TIMER0->CTRL; + ID0=CMSDK_TIMER0->PID0 & 0xFF; + ID1=CMSDK_TIMER0->PID1 & 0xFF; + timer_id = CMSDK_TIMER0->PID2 & 0x07; + if ((ID0 != 0x22) || + (ID1 != 0xB8) || + (timer_id != 0x03)) + return 1; /* part ID & ARM ID does not match */ + else + return 0; +} + +/* --------------------------------------------------------------- */ +/* Timer interrupt test 1 */ +/* --------------------------------------------------------------- */ +/* + Interrupt enable: + Timer is enabled, with reload value set to 0x7F (128 cycles), + and timer interrupt is enabled. + check that timer interrupt has take place as least twice + when counter (software variable) is increased from 0 to 0x300. + If counter is > 0x300 but less than two timer interrupt is received + (timerx_irq_occurred < 2), then flag it as time out error. + + Interrupt disable: + Timer is enabled, with reload value set to 0x1F (32 cycles), + and timer interrupt is disabled. + The counter (software variable) is increased from 0 to 0x100. + Check that timer interrupt did not take place. + (timer0_irq_occurred and timer1_irq_occurred are 0). + +*/ +int timer_interrupt_test_1(CMSDK_TIMER_TypeDef *CMSDK_TIMER){ + int return_val=0; + int err_code=0; + + puts ("Timer interrupt test"); + puts ("- Test interrupt generation enabled."); + CMSDK_TIMER->VALUE = 0; /* Disable timer */ + + gic_initialise_intr(TIMER0_INTR,0,1,0); + gic_install_handler(TIMER0_INTR, &timer_interrupt); + gic_enable_interrupt(TIMER0_INTR); + timer0_irq_expected = 1; + timer1_irq_expected = 0; + timer0_irq_occurred = 0; + timer1_irq_occurred = 0; + + + enable_irq(); + + CMSDK_TIMER->RELOAD = 0x01FF; + CMSDK_TIMER->VALUE = 0x01FF; + CMSDK_TIMER->CTRL = 0x0009; /* Timer enabled */ + counter = 0; + while (( timer0_irq_occurred < 2) && (counter < 0x300)){ + counter ++; + }; + CMSDK_TIMER->CTRL = 0x0000; /* Stop Timer */ + /* Check timeout has not occurred */ + if (counter >= 0x300) { + printf("ERROR : Timer interrupt enable fail.\n"); + err_code += (1<<0); + } + counter = 0; + + disable_irq(); + gic_disable_interrupt(TIMER0_INTR); + if (err_code != 0) { + printf ("ERROR : Interrupt test failed (0x%x)\n", err_code); + return_val=1; + err_code = 0; + } + + return(return_val); + +} + +void timer_interrupt(int num, int src){ + timer0_irq_occurred++; + CMSDK_TIMER0->INTCLEAR=1; + return; +} \ No newline at end of file diff --git a/software/src/gic_tests/makefile b/software/src/gic_tests/makefile new file mode 100644 index 0000000000000000000000000000000000000000..43d348f4467dfe7904699becc0df38316c7868b8 --- /dev/null +++ b/software/src/gic_tests/makefile @@ -0,0 +1,63 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +TEST_PATH := $(CURDIR) +TEST_ID := $(notdir $(TEST_PATH)) + +export TEST_PATH +export TEST_ID + +############################################################################# +#### Variables globally applied for all images ###### +############################################################################# + +ifeq ($(TEST_VERBOSITY), 1) + GLOBAL_TESTDEFINES := -DTEST_VERBOSITY_HIGH +endif + +############################################################################# +############# Variables necessary for compiling image for Cortex A53 ######## +############################################################################# + +APP_TESTDEFINES := + +APP_SCAT := default +APP_LD_FLAGS := +APP_AS_FLAGS := -DMASTER_CPU_REF=0x0 -DMASTER_CLUS_REF=0x0 -DTTBR0_BASE=0x02100000 -DVBAR_ADDR=0x02000000 +APP_AS_INC_DIR := +APP_C_FLAGS := $(APP_TESTDEFINES) $(GLOBAL_TESTDEFINES) -DTTBR0_BASE=0x02100000 +APP_C_INC_DIR := $(TEST_PATH) +APP_SRC := gic_tests.c + +export APP_SRC +export APP_C_FLAGS +export APP_C_INC_DIR +export APP_AS_FLAGS +export APP_AS_INC_DIR +export APP_LD_FLAGS +export APP_SCAT + +############################################################################### +############################################################################### +############################################################################### + +include $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.sim + + + diff --git a/software/src/hello_world/hello_world.c b/software/src/hello_world/hello_world.c new file mode 100644 index 0000000000000000000000000000000000000000..6282327054bdab057bfd9e6349a5c7938f05c62b --- /dev/null +++ b/software/src/hello_world/hello_world.c @@ -0,0 +1,10 @@ +#include "uart_stdout.h" +#include <stdio.h> + +int main(void) { + uint32_t errors = 0; + UartStdOutInit(); + + printf("Hello SoCLabs MegaSoC\n"); + UartEndSimulation(); +} \ No newline at end of file diff --git a/software/src/hello_world/makefile b/software/src/hello_world/makefile new file mode 100644 index 0000000000000000000000000000000000000000..743b082dae12de7ac859b7e491ee059aef44876e --- /dev/null +++ b/software/src/hello_world/makefile @@ -0,0 +1,63 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +TEST_PATH := $(CURDIR) +TEST_ID := $(notdir $(TEST_PATH)) + +export TEST_PATH +export TEST_ID + +############################################################################# +#### Variables globally applied for all images ###### +############################################################################# + +ifeq ($(TEST_VERBOSITY), 1) + GLOBAL_TESTDEFINES := -DTEST_VERBOSITY_HIGH +endif + +############################################################################# +############# Variables necessary for compiling image for Cortex A53 ######## +############################################################################# + +APP_TESTDEFINES := + +APP_SCAT := default +APP_LD_FLAGS := +APP_AS_FLAGS := -DMASTER_CPU_REF=0x0 -DMASTER_CLUS_REF=0x0 -DTTBR0_BASE=0x02100000 -DVBAR_ADDR=0x02000000 +APP_AS_INC_DIR := +APP_C_FLAGS := $(APP_TESTDEFINES) $(GLOBAL_TESTDEFINES) -DTTBR0_BASE=0x02100000 +APP_C_INC_DIR := $(TEST_PATH) +APP_SRC := hello_world.c + +export APP_SRC +export APP_C_FLAGS +export APP_C_INC_DIR +export APP_AS_FLAGS +export APP_AS_INC_DIR +export APP_LD_FLAGS +export APP_SCAT + +############################################################################### +############################################################################### +############################################################################### + +include $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.sim + + + diff --git a/software/src/mem_tests/makefile b/software/src/mem_tests/makefile new file mode 100644 index 0000000000000000000000000000000000000000..00c4dd8c8830b85098de6f00430e2f0a36b00012 --- /dev/null +++ b/software/src/mem_tests/makefile @@ -0,0 +1,63 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +TEST_PATH := $(CURDIR) +TEST_ID := $(notdir $(TEST_PATH)) + +export TEST_PATH +export TEST_ID + +############################################################################# +#### Variables globally applied for all images ###### +############################################################################# + +ifeq ($(TEST_VERBOSITY), 1) + GLOBAL_TESTDEFINES := -DTEST_VERBOSITY_HIGH +endif + +############################################################################# +############# Variables necessary for compiling image for Cortex A53 ######## +############################################################################# + +APP_TESTDEFINES := + +APP_SCAT := default +APP_LD_FLAGS := +APP_AS_FLAGS := -DMASTER_CPU_REF=0x0 -DMASTER_CLUS_REF=0x0 -DTTBR0_BASE=0x02100000 -DVBAR_ADDR=0x02000000 +APP_AS_INC_DIR := +APP_C_FLAGS := $(APP_TESTDEFINES) $(GLOBAL_TESTDEFINES) -DTTBR0_BASE=0x02100000 +APP_C_INC_DIR := $(TEST_PATH) +APP_SRC := mem_tests.c + +export APP_SRC +export APP_C_FLAGS +export APP_C_INC_DIR +export APP_AS_FLAGS +export APP_AS_INC_DIR +export APP_LD_FLAGS +export APP_SCAT + +############################################################################### +############################################################################### +############################################################################### + +include $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.sim + + + diff --git a/software/src/mem_tests/mem_tests.c b/software/src/mem_tests/mem_tests.c new file mode 100644 index 0000000000000000000000000000000000000000..4d9a8db277ff9a0823572c3c04d56f1579f27f33 --- /dev/null +++ b/software/src/mem_tests/mem_tests.c @@ -0,0 +1,19 @@ +#include "uart_stdout.h" +#include <stdio.h> +#include "system.h" + +int main(void) { + uint32_t errors = 0; + UartStdOutInit(); + + printf("Mem Tests - SoCLabs MegaSoC\n"); + + errors += access_addr_wdata(0x00806000,16,0xCAFECAFE); + + if(errors!=0){ + printf("Test Failed\n"); + } else { + printf("Test Passed\n"); + } + UartEndSimulation(); +} \ No newline at end of file diff --git a/software/src/neon_tests/makefile b/software/src/neon_tests/makefile new file mode 100644 index 0000000000000000000000000000000000000000..a26f0545c1f3e123c16d6ad9b66b0370088ce84a --- /dev/null +++ b/software/src/neon_tests/makefile @@ -0,0 +1,63 @@ +##------------------------------------------------------------------------------ +## The confidential and proprietary information contained in this file may +## only be used by a person authorised under and to the extent permitted +## by a subsisting licensing agreement from Arm Limited or its affiliates. +## +## (C) COPYRIGHT 2018-2021 Arm Limited or its affiliates. +## ALL RIGHTS RESERVED +## +## This entire notice must be reproduced on all copies of this file +## and copies of this file may only be made by a person if such person is +## permitted to do so under the terms of a subsisting license agreement +## from Arm Limited or its affiliates. +## +## Release Information : SSE710-r0p0-00rel0 +## +##------------------------------------------------------------------------------ +## Purpose : C test Makefile +##------------------------------------------------------------------------------ + +TEST_PATH := $(CURDIR) +TEST_ID := $(notdir $(TEST_PATH)) + +export TEST_PATH +export TEST_ID + +############################################################################# +#### Variables globally applied for all images ###### +############################################################################# + +ifeq ($(TEST_VERBOSITY), 1) + GLOBAL_TESTDEFINES := -DTEST_VERBOSITY_HIGH +endif + +############################################################################# +############# Variables necessary for compiling image for Cortex A53 ######## +############################################################################# + +APP_TESTDEFINES := + +APP_SCAT := default +APP_LD_FLAGS := +APP_AS_FLAGS := -DMASTER_CPU_REF=0x0 -DMASTER_CLUS_REF=0x0 -DTTBR0_BASE=0x02100000 -DVBAR_ADDR=0x02000000 +APP_AS_INC_DIR := +APP_C_FLAGS := $(APP_TESTDEFINES) $(GLOBAL_TESTDEFINES) -DTTBR0_BASE=0x02100000 +APP_C_INC_DIR := $(TEST_PATH) +APP_SRC := neon_tests.c + +export APP_SRC +export APP_C_FLAGS +export APP_C_INC_DIR +export APP_AS_FLAGS +export APP_AS_INC_DIR +export APP_LD_FLAGS +export APP_SCAT + +############################################################################### +############################################################################### +############################################################################### + +include $(SOCLABS_MEGASOC_TECH_DIR)/software/lib/common/Makefile.sim + + + diff --git a/software/src/neon_tests/neon_tests.c b/software/src/neon_tests/neon_tests.c new file mode 100644 index 0000000000000000000000000000000000000000..f851bfe3a93b131283ad255d769169e72d3a840e --- /dev/null +++ b/software/src/neon_tests/neon_tests.c @@ -0,0 +1,32 @@ +#include "uart_stdout.h" +#include <stdio.h> +#include "arm_neon.h" + +int main(void) { + uint32_t errors = 0; + UartStdOutInit(); + + printf("Neon Tests - SoCLabs MegaSoC\n"); + + float f1 = 2.200002; + float f2 = 2.200001; + float ans = 1.0; + printf("Starting SP Floating point ...\n"); + for (int i=0; i<10;i++){ + ans *= f1; + ans *= f2; + } + printf("...Finshed\n"); + printf("ans = %f\n",ans); + + printf("Starting SP Vector Floating point ...\n"); + + float32x4_t v1 = { 1.0, 2.0, 3.0, 4.0 }, v2 = { 1.0, 1.0, 1.0, 1.0 }; + float32x4_t sum = vaddq_f32(v1, v2); + printf("sum = %f\n",sum[0]); + + printf("...Finshed\n"); + + UartEndSimulation(); +} +