diff --git a/.gitattributes b/.gitattributes
index 556322be01b4a8..69dcb5bb2d0cde 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -6,6 +6,7 @@
*.pm text eol=lf diff=perl
*.py text eol=lf diff=python
*.bat text eol=crlf
+*.png binary
CODE_OF_CONDUCT.md -whitespace
/Documentation/**/*.adoc text eol=lf whitespace=trail,space,incomplete
/command-list.txt text eol=lf
diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml
new file mode 100644
index 00000000000000..b49593339932b2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -0,0 +1,105 @@
+name: Bug report
+description: Use this template to report bugs.
+body:
+ - type: checkboxes
+ id: search
+ attributes:
+ label: Existing issues matching what you're seeing
+ description: Please search for [open](https://github.com/git-for-windows/git/issues?q=is%3Aopen) or [closed](https://github.com/git-for-windows/git/issues?q=is%3Aclosed) issue matching what you're seeing before submitting a new issue.
+ options:
+ - label: I was not able to find an open or closed issue matching what I'm seeing
+ - type: textarea
+ id: git-for-windows-version
+ attributes:
+ label: Git for Windows version
+ description: Which version of Git for Windows are you using?
+ placeholder: Please insert the output of `git --version --build-options` here
+ render: shell
+ validations:
+ required: true
+ - type: dropdown
+ id: windows-version
+ attributes:
+ label: Windows version
+ description: Which version of Windows are you running?
+ options:
+ - Windows 8.1
+ - Windows 10
+ - Windows 11
+ - Other
+ default: 2
+ validations:
+ required: true
+ - type: dropdown
+ id: windows-arch
+ attributes:
+ label: Windows CPU architecture
+ description: What CPU Archtitecture does your Windows target?
+ options:
+ - i686 (32-bit)
+ - x86_64 (64-bit)
+ - ARM64
+ default: 1
+ validations:
+ required: true
+ - type: textarea
+ id: windows-version-cmd
+ attributes:
+ label: Additional Windows version information
+ description: This provides us with further information about your Windows such as the build number
+ placeholder: Please insert the output of `cmd.exe /c ver` here
+ render: shell
+ - type: textarea
+ id: options
+ attributes:
+ label: Options set during installation
+ description: What options did you set as part of the installation? Or did you choose the defaults?
+ placeholder: |
+ One of the following:
+ > type "C:\Program Files\Git\etc\install-options.txt"
+ > type "C:\Program Files (x86)\Git\etc\install-options.txt"
+ > type "%USERPROFILE%\AppData\Local\Programs\Git\etc\install-options.txt"
+ > type "$env:USERPROFILE\AppData\Local\Programs\Git\etc\install-options.txt"
+ $ cat /etc/install-options.txt
+ render: shell
+ validations:
+ required: true
+ - type: textarea
+ id: other-things
+ attributes:
+ label: Other interesting things
+ description: Any other interesting things about your environment that might be related to the issue you're seeing?
+ - type: input
+ id: terminal
+ attributes:
+ label: Terminal/shell
+ description: Which terminal/shell are you running Git from? e.g Bash/CMD/PowerShell/other
+ validations:
+ required: true
+ - type: textarea
+ id: commands
+ attributes:
+ label: Commands that trigger the issue
+ description: What commands did you run to trigger this issue? If you can provide a [Minimal, Complete, and Verifiable example](http://stackoverflow.com/help/mcve) this will help us understand the issue.
+ render: shell
+ validations:
+ required: true
+ - type: textarea
+ id: expected-behaviour
+ attributes:
+ label: Expected behaviour
+ description: What did you expect to occur after running these commands?
+ validations:
+ required: true
+ - type: textarea
+ id: actual-behaviour
+ attributes:
+ label: Actual behaviour
+ description: What actually happened instead?
+ validations:
+ required: true
+ - type: textarea
+ id: repository
+ attributes:
+ label: Repository
+ description: If the problem was occurring with a specific repository, can you provide the URL to that repository to help us with testing?
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000000000..ec4bb386bcf8a4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1 @@
+blank_issues_enabled: false
\ No newline at end of file
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 37654cdfd7abcf..7baf31f2c471ec 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,7 +1,19 @@
-Thanks for taking the time to contribute to Git! Please be advised that the
-Git community does not use github.com for their contributions. Instead, we use
-a mailing list (git@vger.kernel.org) for code submissions, code reviews, and
-bug reports. Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
+Thanks for taking the time to contribute to Git!
+
+Those seeking to contribute to the Git for Windows fork should see
+http://gitforwindows.org/#contribute on how to contribute Windows specific
+enhancements.
+
+If your contribution is for the core Git functions and documentation
+please be aware that the Git community does not use the github.com issues
+or pull request mechanism for their contributions.
+
+Instead, we use the Git mailing list (git@vger.kernel.org) for code and
+documentation submissions, code reviews, and bug reports. The
+mailing list is plain text only (anything with HTML is sent directly
+to the spam folder).
+
+Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
to conveniently send your Pull Requests commits to our mailing list.
For a single-commit pull request, please *leave the pull request description
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000000000..22d5376407abf1
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,13 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
+# especially
+# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot#enabling-dependabot-version-updates-for-actions
+
+version: 2
+updates:
+ - package-ecosystem: "github-actions" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 826f2f5d3a6a88..ff2da34aaac54c 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -169,8 +169,11 @@ jobs:
NO_PERL: 1
GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
runs-on: windows-latest
+ strategy:
+ matrix:
+ arch: [x64, arm64]
concurrency:
- group: vs-build-${{ github.ref }}
+ group: vs-build-${{ github.ref }}-${{ matrix.arch }}
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- uses: actions/checkout@v5
@@ -189,14 +192,14 @@ jobs:
uses: microsoft/setup-msbuild@v2
- name: copy dlls to root
shell: cmd
- run: compat\vcbuild\vcpkg_copy_dlls.bat release
+ run: compat\vcbuild\vcpkg_copy_dlls.bat release ${{ matrix.arch }}-windows
- name: generate Visual Studio solution
shell: bash
run: |
- cmake `pwd`/contrib/buildsystems/ -DCMAKE_PREFIX_PATH=`pwd`/compat/vcbuild/vcpkg/installed/x64-windows \
- -DNO_GETTEXT=YesPlease -DPERL_TESTS=OFF -DPYTHON_TESTS=OFF -DCURL_NO_CURL_CMAKE=ON
+ cmake `pwd`/contrib/buildsystems/ -DCMAKE_PREFIX_PATH=`pwd`/compat/vcbuild/vcpkg/installed/${{ matrix.arch }}-windows \
+ -DNO_GETTEXT=YesPlease -DPERL_TESTS=OFF -DPYTHON_TESTS=OFF -DCURL_NO_CURL_CMAKE=ON -DCMAKE_GENERATOR_PLATFORM=${{ matrix.arch }} -DVCPKG_ARCH=${{ matrix.arch }}-windows -DHOST_CPU=${{ matrix.arch }}
- name: MSBuild
- run: msbuild git.sln -property:Configuration=Release -property:Platform=x64 -maxCpuCount:4 -property:PlatformToolset=v142
+ run: msbuild git.sln -property:Configuration=Release -property:Platform=${{ matrix.arch }} -maxCpuCount:4 -property:PlatformToolset=v142
- name: bundle artifact tar
shell: bash
env:
@@ -210,7 +213,7 @@ jobs:
- name: upload tracked files and build artifacts
uses: actions/upload-artifact@v5
with:
- name: vs-artifacts
+ name: vs-artifacts-${{ matrix.arch }}
path: artifacts
vs-test:
name: win+VS test
@@ -228,7 +231,7 @@ jobs:
- name: download tracked files and build artifacts
uses: actions/download-artifact@v6
with:
- name: vs-artifacts
+ name: vs-artifacts-x64
path: ${{github.workspace}}
- name: extract tracked files and build artifacts
shell: bash
diff --git a/.github/workflows/monitor-components.yml b/.github/workflows/monitor-components.yml
new file mode 100644
index 00000000000000..f15ff218d28b81
--- /dev/null
+++ b/.github/workflows/monitor-components.yml
@@ -0,0 +1,94 @@
+name: Monitor component updates
+
+# Git for Windows is a slightly modified subset of MSYS2. Some of its
+# components are maintained by Git for Windows, others by MSYS2. To help
+# keeping the former up to date, this workflow monitors the Atom/RSS feeds
+# and opens new tickets for each new component version.
+
+on:
+ schedule:
+ - cron: "23 8,11,14,17 * * *"
+ workflow_dispatch:
+
+env:
+ CHARACTER_LIMIT: 5000
+ MAX_AGE: 7d
+
+jobs:
+ job:
+ # Only run this in Git for Windows' fork
+ if: github.event.repository.owner.login == 'git-for-windows'
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ strategy:
+ matrix:
+ component:
+ - label: git
+ feed: https://github.com/git/git/tags.atom
+ - label: git-lfs
+ feed: https://github.com/git-lfs/git-lfs/tags.atom
+ - label: git-credential-manager
+ feed: https://github.com/git-ecosystem/git-credential-manager/tags.atom
+ - label: tig
+ feed: https://github.com/jonas/tig/tags.atom
+ - label: cygwin
+ feed: https://github.com/cygwin/cygwin/releases.atom
+ title-pattern: ^(?!.*newlib)
+ - label: msys2-runtime-package
+ feed: https://github.com/msys2/MSYS2-packages/commits/master/msys2-runtime.atom
+ - label: msys2-runtime
+ feed: https://github.com/msys2/msys2-runtime/commits/HEAD.atom
+ aggregate: true
+ - label: openssh
+ feed: https://github.com/openssh/openssh-portable/tags.atom
+ - label: libfido2
+ feed: https://github.com/Yubico/libfido2/tags.atom
+ - label: libcbor
+ feed: https://github.com/PJK/libcbor/tags.atom
+ - label: openssl
+ feed: https://github.com/openssl/openssl/tags.atom
+ title-pattern: ^(?!.*alpha)
+ - label: gnutls
+ feed: https://gnutls.org/news.atom
+ - label: heimdal
+ feed: https://github.com/heimdal/heimdal/tags.atom
+ - label: git-sizer
+ feed: https://github.com/github/git-sizer/tags.atom
+ - label: gitflow
+ feed: https://github.com/petervanderdoes/gitflow-avh/tags.atom
+ - label: curl
+ feed: https://github.com/curl/curl/tags.atom
+ title-pattern: ^(?!rc-)
+ - label: mintty
+ feed: https://github.com/mintty/mintty/releases.atom
+ - label: 7-zip
+ feed: https://sourceforge.net/projects/sevenzip/rss?path=/7-Zip
+ aggregate: true
+ - label: bash
+ feed: https://git.savannah.gnu.org/cgit/bash.git/atom/?h=master
+ aggregate: true
+ - label: perl
+ feed: https://github.com/Perl/perl5/tags.atom
+ title-pattern: ^(?!.*(5\.[0-9]+[13579]|RC))
+ - label: pcre2
+ feed: https://github.com/PCRE2Project/pcre2/tags.atom
+ - label: mingw-w64-llvm
+ feed: https://github.com/msys2/MINGW-packages/commits/master/mingw-w64-llvm.atom
+ - label: innosetup
+ feed: https://github.com/jrsoftware/issrc/tags.atom
+ - label: mimalloc
+ feed: https://github.com/microsoft/mimalloc/tags.atom
+ title-pattern: ^(?!v1\.|v3\.[01]\.)
+ fail-fast: false
+ steps:
+ - uses: git-for-windows/rss-to-issues@v0
+ with:
+ feed: ${{matrix.component.feed}}
+ prefix: "[New ${{matrix.component.label}} version]"
+ labels: component-update
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ character-limit: ${{ env.CHARACTER_LIMIT }}
+ max-age: ${{ env.MAX_AGE }}
+ aggregate: ${{matrix.component.aggregate}}
+ title-pattern: ${{matrix.component.title-pattern}}
diff --git a/.github/workflows/nano-server.yml b/.github/workflows/nano-server.yml
new file mode 100644
index 00000000000000..85b3ed5f52ed4d
--- /dev/null
+++ b/.github/workflows/nano-server.yml
@@ -0,0 +1,76 @@
+name: Windows Nano Server tests
+
+on:
+ workflow_dispatch:
+
+env:
+ DEVELOPER: 1
+
+jobs:
+ test-nano-server:
+ runs-on: windows-2022
+ env:
+ WINDBG_DIR: "C:/Program Files (x86)/Windows Kits/10/Debuggers/x64"
+ IMAGE: mcr.microsoft.com/powershell:nanoserver-ltsc2022
+
+ steps:
+ - uses: actions/checkout@v5
+ - uses: git-for-windows/setup-git-for-windows-sdk@v1
+ - name: build Git
+ shell: bash
+ run: make -j15
+ - name: pull nanoserver image
+ shell: bash
+ run: docker pull $IMAGE
+ - name: run nano-server test
+ shell: bash
+ run: |
+ docker run \
+ --user "ContainerAdministrator" \
+ -v "$WINDBG_DIR:C:/dbg" \
+ -v "$(cygpath -aw /mingw64/bin):C:/mingw64-bin" \
+ -v "$(cygpath -aw .):C:/test" \
+ $IMAGE pwsh.exe -Command '
+ # Extend the PATH to include the `.dll` files in /mingw64/bin/
+ $env:PATH += ";C:\mingw64-bin"
+
+ # For each executable to test pick some no-operation set of
+ # flags/subcommands or something that should quickly result in an
+ # error with known exit code that is not a negative 32-bit
+ # number, and set the expected return code appropriately.
+ #
+ # Only test executables that could be expected to run in a UI
+ # less environment.
+ #
+ # ( Executable path, arguments, expected return code )
+ # also note space is required before close parenthesis (a
+ # powershell quirk when defining nested arrays like this)
+
+ $executables_to_test = @(
+ ("C:\test\git.exe", "", 1 ),
+ ("C:\test\scalar.exe", "version", 0 )
+ )
+
+ foreach ($executable in $executables_to_test)
+ {
+ Write-Output "Now testing $($executable[0])"
+ &$executable[0] $executable[1]
+ if ($LASTEXITCODE -ne $executable[2]) {
+ # if we failed, run the debugger to find out what function
+ # or DLL could not be found and then exit the script with
+ # failure The missing DLL or EXE will be referenced near
+ # the end of the output
+
+ # Set a flag to have the debugger show loader stub
+ # diagnostics. This requires running as administrator,
+ # otherwise the flag will be ignored.
+ C:\dbg\gflags -i $executable[0] +SLS
+
+ C:\dbg\cdb.exe -c "g" -c "q" $executable[0] $executable[1]
+
+ exit 1
+ }
+ }
+
+ exit 0
+ '
diff --git a/.gitignore b/.gitignore
index 24635cf2d6f4a3..64ad9d181c40bf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -171,6 +171,7 @@
/git-submodule
/git-submodule--helper
/git-subtree
+/git-survey
/git-svn
/git-switch
/git-symbolic-ref
@@ -257,5 +258,6 @@ Release/
/git.VC.db
*.dSYM
/contrib/buildsystems/out
+CMakeSettings.json
/contrib/libgit-rs/target
/contrib/libgit-sys/target
diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md
new file mode 100644
index 00000000000000..7de4f99bf71ec4
--- /dev/null
+++ b/ARCHITECTURE.md
@@ -0,0 +1,116 @@
+# Architecture of Git for Windows
+
+Git for Windows is a complex project.
+
+## What _is_ Git for Windows?
+
+### A fork of `git/git`
+
+First and foremost, it is a friendly fork of [`git/git`](https://github.com/git/git), aiming to improve Git's Windows support. The [`git-for-windows/git`](https://github.com/git-for-windows/git) repository contains dozens of topics on top of `git/git`, some awaiting to be "upstreamed" (i.e. to be contributed to `git/git`), some still being stabilized, and a few topics are specific to the Git for Windows project and are not intended to be integrated into `git/git` at all.
+
+### Enhancing and maintaining Git's support for Windows
+
+On the source code side, Git's Windows support is made a bit more tricky than strictly necessary by the fact that Git does not have any platform abstraction layer (unlike other version control systems, such as Subversion). It relies on the presence of POSIX features such as the `hstrerror()` function, and on platforms lacking that functionality, Git provides shims. That leads to some challenges e.g. with the `stat()` function which is very slow on Windows because it has to collect much more metadata than what e.g. the very quick `GetFileAttributesExW()` Win32 API function provides, even when Git calls `stat()` merely to test for the presence of a file (for which all that gathered metadata is totally irrelevant).
+
+### Providing more than just source code
+
+In contrast to the Git project, Git for Windows not only publishes tagged source code versions, but full builds of Git. In fact, Git for Windows' primary purpose, as far as most users are concerned, is to provide a convenient installer that end-users can run to have Git on their computer, without ever having to check out `git-for-windows/git` let alone build it. In essence, Git for Windows has to maintain a separate project altogether in addition to the fork of `git/git`, just to build these release artifacts: [`git-for-windows/build-extra`](https://github.com/git-for-windows/build-extra). This repository also contains the definition for a couple of other release artifacts published by Git for Windows, e.g. the "portable" edition of Git for Windows which is a self-extracting 7-Zip archive that does not need to be installed.
+
+### A software distribution, really
+
+Another aspect that contributes to the complexity of Git for Windows is that it is not just building `git.exe` and distributes that. Due to its heritage within the Linux project, Git takes certain things for granted, such as the presence of a Unix shell, or for that matter, a package management system from which dependencies can be fetched and updated independently of Git itself. Things that are distinctly not present in most Windows setups. To accommodate for that, Git for Windows originally relied on the MSys project, a minimal fork of Cygwin providing a Unix shell ("Bash"), a Perl interpreter and similar Unix-like tools, and on the MINGW project, a project to build libraries and executables using a GNU C Compiler that relies only on Win32 API functions. As of Git for Windows v2.x, the project has switched away from [MSys](https://sourceforge.net/projects/mingw/files/MSYS/)/[MinGW](https://osdn.net/projects/mingw/) (due to less-than-active maintenance) to [the MSYS2 project](https://msys2.org). That switch brought along the benefit of a robust package management system based on [Pacman](https://archlinux.org/pacman/) (hailing from Arch Linux). To support Windows users, who are in general unfamiliar with Linux-like package management and the need to update installed packages frequently, Git for Windows bundles a subset of its own fork of MSYS2. To put things in perspective: Git for Windows bundles files from ~170 packages, one of which contains Git, and another one contains Git's help files. In that respect, Git for Windows acts like a distribution more than like a mere single software application.
+
+Most of MSYS2's packages that are bundled in Git for Windows are consumed directly from MSYS2. Others need forks that are maintained by Git for Windows project, to support Git for Windows better. These forks live in the [`git-for-windows/MSYS2-packages`](https://github.com/git-for-windows/MSYS2-packages) and [`git-for-windows/MINGW-packages`](https://github.com/git-for-windows/MINGW-packages) repositories. There are several reasons justifying these forks. For example, the Git for Windows' flavor of the MSYS2 runtime behaves like Git's test suite expects it while MSYS2's flavor does not. Another example: The Bash executable bundled in Git for Windows is code-signed with the same certificate as `git.exe` to help anti-malware programs get out of the users' way. That is why Git for Windows maintains its own `bash` Pacman package. And since MSYS2 dropped 32-bit support already, Git for Windows has to update the 32-bit Pacman packages itself, which is done in the git-for-windows/MSYS2-packages repository. (Side note: the 32-bit issue is a bit more complicated, actually: MSYS2 _still_ builds _MINGW_ packages targeting i686 processors, but no longer any _MSYS_ packages for said processor architecture, and Git for Windows does not keep all of the 32-bit MSYS packages up to date but instead judiciously decides which packages are vital enough as far as Git is concerned to justify the maintenance cost.)
+
+### Supporting third-party applications that use Git's functionality
+
+Since the infrastructure required by Git is non-trivial the installer (or for that matter, the Portable Git) is not exactly light-weight: As of January 2023, both artifacts are over fifty megabytes. This is a problem for third-party applications wishing to bundle a version of Git for Windows, which is often advisable given that applications may depend on features that have been introduced only in recent Git versions and therefore relying on an installed Git for Windows could break things. To help with that, the Git for Windows project also provides MinGit as a release artifact, a zip file that is much smaller than the full installer and that contains only the parts of Git for Windows relevant for third-party applications. It lacks Git GUI, for example, as well as the terminal program MinTTY, or for that matter, the documentation.
+
+### Supporting `git/git`'s GitHub workflows
+
+The Git for Windows project is also responsible for keeping the Windows part of `git/git`'s automated builds up and running. On Windows, there is no canonical and easy way to get a build environment necessary to build Git and run its test suite, therefore this is a non-trivial task that comes with its own maintenance cost. Git for Windows provides two GitHub Actions to help with that: [`git-for-windows/setup-git-for-windows-sdk`](https://github.com/git-for-windows/setup-git-for-windows-sdk) to set up a tiny subset of Git for Windows' full SDK (which would require about 500MB to be cloned, as opposed to the ~75MB of that subset) and [`git-for-windows/get-azure-pipelines-artifact`](https://github.com/git-for-windows/get-azure-pipelines-artifact) e.g. to download some regularly pre-built artifacts (for example, when `git/git`'s automated tests ran on an Ubuntu version that did not provide an up to date [Coccinelle](https://coccinelle.gitlabpages.inria.fr/website/) package, this GitHub Action was used to download a pre-built version of that Debian package).
+
+## Maintaining Git for Windows' components
+
+Git for Windows uses a combination of [a GitHub App called GitForWindowsHelper](https://github.com/git-for-windows/gfw-helper-github-app) (to listen for so-called [slash commands](https://github.com/git-for-windows/gfw-helper-github-app#slash-commands)) combined with workflows in [the `git-for-windows-automation` repository](https://github.com/git-for-windows/git-for-windows-automation/) (for computationally heavy tasks) to support Git for Windows' repetitive tasks.
+
+This heavy automation serves two purposes:
+
+1. Document the knowledge about "how things are done" in the Git for Windows project.
+2. Make Git for Windows' maintenance less tedious by off-loading as many tasks onto machines as possible.
+
+One neat trick of some `git-for-windows-automation` workflows is that they "mirror back" check runs to the targeted PRs in another repository. This essentially allows versioning the source code independently of the workflow definition.
+
+Here is a diagram showing how the bits and pieces fit together.
+
+```mermaid
+graph LR
+ A[`monitor-components`] --> |opens| B
+ B{issues labeled
`component-update`} --> |/open pr| C
+ C((GitForWindowsHelper)) --> |triggers| D
+ D[`open-pr`] --> |opens| E
+ E{PR inMINGW-packages
MSYS2-packages
build-extra} --> |closes| B
+ E --> |/deploy| F
+ F((GitForWindowsHelper)) --> |triggers| G
+ G[`build-and-deploy`] --> |deploys to| H
+ H{Pacman repository}
+ C --> |backed by| I
+ F --> |backed by| I
+ I[[Azure Function]]
+ D --> |running in| J
+ G --> | running in| J
+ J[[git-for-windows-automation]]
+ K[[git-sdk-32
git-sdk-64
git-sdk-arm64]] --> |syncing from| H
+ B --> |/add release note| L
+ L[`add-release-note`]
+```
+
+For the curious mind, here are [detailed instructions how the Azure Function backing the GitForWindowsHelper GitHub App was set up](https://github.com/git-for-windows/gfw-helper-github-app#how-this-github-app-was-set-up).
+
+### The `monitor-components` workflow
+
+When new versions of components that Git for Windows builds become available, new Pacman packages have to be built. To this end, [the `monitor-components` workflow](https://github.com/git-for-windows/git/blob/main/.github/workflows/monitor-components.yml) monitors a couple of RSS feeds and opens new tickets labeled `component-update` for such new versions.
+
+### Opening Pull Requests to update Git for Windows' components
+
+After determining that such a ticket indeed indicates the need for a new Pacman package build, a Git for Windows maintainer issues the `/open pr` command via an issue comment ([example](https://github.com/git-for-windows/git/issues/4281#issuecomment-1426859787)), which gets picked up by the GitForWindowsHelper GitHub App, which in turn triggers [the `open-pr` workflow](https://github.com/git-for-windows/git-for-windows-automation/blob/main/.github/workflows/open-pr.yml) in the `git-for-windows-automation` repository.
+
+### Deploying the Pacman packages
+
+This will open a Pull Request in one of Git for Windows' repositories, and once the PR build passes, a Git for Windows maintainer issues the `/deploy` command ([example](https://github.com/git-for-windows/MINGW-packages/pull/69#issuecomment-1427591890)), which gets picked up by the GitForWindowsHelper GitHub App, which triggers [the `build-and-deploy` workflow](https://github.com/git-for-windows/git-for-windows-automation/blob/main/.github/workflows/build-and-deploy.yml).
+
+### Adding release notes
+
+Finally, once the packages have been built and deployed to the Pacman repository (which is hosted in Azure Blob Storage), a Git for Windows maintainer will merge the PR(s), which in turn will close the ticket, and the maintainer then issues an `/add release note` command ([example](https://github.com/git-for-windows/MINGW-packages/pull/69#issuecomment-1427782230)), which again gets picked up by the GitForWindowsHelper GitHub App that triggers [the `add-release-note` workflow](https://github.com/git-for-windows/build-extra/blob/main/.github/workflows/add-release-note.yml) that creates and pushes a new commit to the `ReleaseNotes.md` file in `build-extra` ([example](https://github.com/git-for-windows/build-extra/commit/b39c148ff8dc0e987afdb677d17c46a8e99fd0ef)).
+
+## Releasing official Git for Windows versions
+
+A relatively infrequent part of Git for Windows' maintainers' duties, if the most rewarding part, is the task of releasing new versions of Git for Windows.
+
+Most commonly, this is done in response to the "upstream" Git project releasing a new version. When that happens, a Git for Windows maintainer runs [the helper script](https://github.com/git-for-windows/build-extra/blob/main/shears.sh) to perform a "merging rebase" (i.e. a rebase that starts with a fake-merge of the previous tip commit, to maintain both a clean set of commits as well as a [fast-forwarding](https://git-scm.com/docs/git-merge#Documentation/git-merge.txt---ff-only) commit history).
+
+Once that is done, the maintainer will open a Pull Request to benefit from the automated builds and tests ([example](https://github.com/git-for-windows/git/pull/4160)) as well as from reviews of the [`range-diff`](https://git-scm.com/docs/git-range-diff) relative to the current `main` branch.
+
+Once everything looks good, the maintainer will issue the `/git-artifacts` command ([example](https://github.com/git-for-windows/git/pull/4160#issuecomment-1346801735)). This will trigger an automated workflow that builds all of the release artifacts: installers, Portable Git, MinGit, `.tar.xz` archive and a NuGet package. Apart from the NuGet package, two sets of artifacts are built: targeting 32-bit ("x86") and 64-bit ("amd64").
+
+Once these artifacts are built, the maintainer will download the installer and run [the "pre-flight checklist"](https://github.com/git-for-windows/build-extra/blob/main/installer/checklist.txt).
+
+If everything looks good, a `/release` command will be issued, which triggers yet another workflow that will download the just-built-and-verified release artifacts, publish them as a new GitHub release, publish the NuGet packages, deploy the Pacman packages to the Pacman repository, send out an announcement mail, and update the respective repositories including [Git for Windows' website](https://gitforwindows.org/).
+
+As mentioned [before](#architecture-of-git-for-windows), the `/git-artifacts` and `/release` commands are picked up by the GitForWindowsHelper GitHub App which subsequently triggers the respective workflows in the `git-for-windows-automation` repository. Here is a diagram:
+
+```mermaid
+graph LR
+ A{Pull Request
updating to
new Git version} --> |/git-artifacts| B
+ B((GitForWindowsHelper)) --> |triggers| C
+ C[`tag-git`] --> |upon successful build
triggers| D
+ D((GitForWindowsHelper)) --> |triggers| E
+ E[`git-artifacts`]
+ E --> |maintainer verifies artifacts| E
+ A --> |upon verified `git-artifacts`
/release| F
+ F[`release-git`]
+ C --> |running in| J
+ E --> | running in| J
+ F --> | running in| J
+ J[[git-for-windows-automation]]
+```
\ No newline at end of file
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index e58917c50a96dc..4daef7e3ce9196 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,9 +1,9 @@
-# Git Code of Conduct
+# Git for Windows Code of Conduct
This code of conduct outlines our expectations for participants within
-the Git community, as well as steps for reporting unacceptable behavior.
-We are committed to providing a welcoming and inspiring community for
-all and expect our code of conduct to be honored. Anyone who violates
+the **Git for Windows** community, as well as steps for reporting unacceptable
+behavior. We are committed to providing a welcoming and inspiring community
+for all and expect our code of conduct to be honored. Anyone who violates
this code of conduct may be banned from the community.
## Our Pledge
@@ -12,8 +12,8 @@ We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
-nationality, personal appearance, race, religion, or sexual identity
-and orientation.
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
@@ -28,17 +28,17 @@ community include:
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
-* Focusing on what is best not just for us as individuals, but for the
- overall community
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
Examples of unacceptable behavior include:
-* The use of sexualized language or imagery, and sexual attention or
- advances of any kind
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
-* Publishing others' private information, such as a physical or email
- address, without their explicit permission
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
@@ -58,20 +58,14 @@ decisions when appropriate.
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
-Examples of representing our community include using an official e-mail address,
+Examples of representing our community include using an official email address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported to the community leaders responsible for enforcement at
-git@sfconservancy.org, or individually:
-
- - Ævar Arnfjörð Bjarmason
- - Christian Couder
- - Junio C Hamano
- - Taylor Blau
+reported by contacting the Git for Windows maintainer.
All complaints will be reviewed and investigated promptly and fairly.
@@ -94,15 +88,15 @@ behavior was inappropriate. A public apology may be requested.
### 2. Warning
-**Community Impact**: A violation through a single incident or series
-of actions.
+**Community Impact**: A violation through a single incident or series of
+actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
-like social media. Violating these terms may lead to a temporary or
-permanent ban.
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
### 3. Temporary Ban
@@ -118,27 +112,27 @@ Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
-standards, including sustained inappropriate behavior, harassment of an
+standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
-**Consequence**: A permanent ban from any sort of public interaction within
-the community.
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
-version 2.0, available at
-[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
-[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
-at [https://www.contributor-covenant.org/translations][translations].
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
-[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000000000..48ff9029374df3
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,417 @@
+How to Contribute to Git for Windows
+====================================
+
+Git was originally designed for Unix systems and still today, all the build tools for the Git
+codebase assume you have standard Unix tools available in your path. If you have an open-source
+mindset and want to start contributing to Git, but primarily use a Windows machine, then you may
+have trouble getting started. This guide is for you.
+
+Get the Source
+--------------
+
+Clone the [GitForWindows repository on GitHub](https://github.com/git-for-windows/git).
+It is helpful to create your own fork for storing your development branches.
+
+Windows uses different line endings than Unix systems. See
+[this GitHub article on working with line endings](https://help.github.com/articles/dealing-with-line-endings/#refreshing-a-repository-after-changing-line-endings)
+if you have trouble with line endings.
+
+Build the Source
+----------------
+
+First, download and install the latest [Git for Windows SDK (64-bit)](https://github.com/git-for-windows/build-extra/releases/latest).
+When complete, you can run the Git SDK, which creates a new Git Bash terminal window with
+the additional development commands, such as `make`.
+
+ As of time of writing, the SDK uses a different credential manager, so you may still want to use normal Git
+ Bash for interacting with your remotes. Alternatively, use SSH rather than HTTPS and
+ avoid credential manager problems.
+
+You should now be ready to type `make` from the root of your `git` source directory.
+Here are some helpful variations:
+
+* `make -j[N] DEVELOPER=1`: Compile new sources using up to N concurrent processes.
+ The `DEVELOPER` flag turns on all warnings; code failing these warnings will not be
+ accepted upstream ("upstream" = "the core Git project").
+* `make clean`: Delete all compiled files.
+
+When running `make`, you can use `-j$(nproc)` to automatically use the number of processors
+on your machine as the number of concurrent build processes.
+
+You can go deeper on the Windows-specific build process by reading the
+[technical overview](https://gitforwindows.org/technical-overview) or the
+[guide to compiling Git with Visual Studio](https://gitforwindows.org/compiling-git-with-visual-studio).
+
+## Building `git` on Windows with Visual Studio
+
+The typical approach to building `git` is to use the standard `Makefile` with GCC, as
+above. Developers working in a Windows environment may want to instead build with the
+[Microsoft Visual C++ compiler and libraries toolset (MSVC)](https://blogs.msdn.microsoft.com/vcblog/2017/03/07/msvc-the-best-choice-for-windows/).
+There are a few benefits to using MSVC over GCC during your development, including creating
+symbols for debugging and [performance tracing](https://github.com/Microsoft/perfview#perfview-overview).
+
+There are two ways to build Git for Windows using MSVC. Each have their own merits.
+
+### Using SDK Command Line
+
+Use one of the following commands from the SDK Bash window to build Git for Windows:
+
+```
+ make MSVC=1 -j12
+ make MSVC=1 DEBUG=1 -j12
+```
+
+The first form produces release-mode binaries; the second produces debug-mode binaries.
+Both forms produce PDB files and can be debugged. However, the first is best for perf
+tracing and the second is best for single-stepping.
+
+You can then open Visual Studio and select File -> Open -> Project/Solution and select
+the compiled `git.exe` file. This creates a basic solution and you can use the debugging
+and performance tracing tools in Visual Studio to monitor a Git process. Use the Debug
+Properties page to set the working directory and command line arguments.
+
+Be sure to clean up before switching back to GCC (or to switch between debug and
+release MSVC builds):
+
+```
+ make MSVC=1 -j12 clean
+ make MSVC=1 DEBUG=1 -j12 clean
+```
+
+### Using the IDE
+
+If you prefer working in Visual Studio with a solution full of projects, then you can use
+CMake, either by letting Visual Studio configure it automatically (simply open Git's
+top-level directory via `File>Open>Folder...`) or by (downloading and) running
+[CMake](https://cmake.org) manually.
+
+What to Change?
+---------------
+
+Many new contributors ask: What should I start working on?
+
+One way to win big with the open-source community is to look at the
+[issues page](https://github.com/git-for-windows/git/issues) and see if there are any issues that
+you can fix quickly, or if anything catches your eye.
+
+You can also look at [the unofficial Chromium issues page](https://crbug.com/git) for
+multi-platform issues. You can look at recent user questions on
+[the Git mailing list](https://public-inbox.org/git).
+
+Or you can "scratch your own itch", i.e. address an issue you have with Git. The team at Microsoft where the Git for Windows maintainer works, for example, is focused almost entirely on [improving performance](https://blogs.msdn.microsoft.com/devops/2018/01/11/microsofts-performance-contributions-to-git-in-2017/).
+We approach our work by finding something that is slow and try to speed it up. We start our
+investigation by reliably reproducing the slow behavior, then running that example using
+the MSVC build and tracing the results in PerfView.
+
+You could also think of something you wish Git could do, and make it do that thing! The
+only concern I would have with this approach is whether or not that feature is something
+the community also wants. If this excites you though, go for it! Don't be afraid to
+[get involved in the mailing list](http://vger.kernel.org/vger-lists.html#git) early for
+feedback on the idea.
+
+Test Your Changes
+-----------------
+
+After you make your changes, it is important that you test your changes. Manual testing is
+important, but checking and extending the existing test suite is even more important. You
+want to run the functional tests to see if you broke something else during your change, and
+you want to extend the functional tests to be sure no one breaks your feature in the future.
+
+### Functional Tests
+
+Navigate to the `t/` directory and type `make` to run all tests or use `prove` as
+[described on this Git for Windows page](https://gitforwindows.org/building-git):
+
+```
+prove -j12 --state=failed,save ./t[0-9]*.sh
+```
+
+You can also run each test directly by running the corresponding shell script with a name
+like `tNNNN-descriptor.sh`.
+
+If you are adding new functionality, you may need to create unit tests by creating
+helper commands that test a very limited action. These commands are stored in `t/helpers`.
+When adding a helper, be sure to add a line to `t/Makefile` and to the `.gitignore` for the
+binary file you add. The Git community prefers functional tests using the full `git`
+executable, so try to exercise your new code using `git` commands before creating a test
+helper.
+
+To find out why a test failed, repeat the test with the `-x -v -d -i` options and then
+navigate to the appropriate "trash" directory to see the data shape that was used for the
+test failed step.
+
+Read [`t/README`](t/README) for more details.
+
+### Performance Tests
+
+If you are working on improving performance, you will need to be acquainted with the
+performance tests in `t/perf`. There are not too many performance tests yet, but adding one
+as your first commit in a patch series helps to communicate the boost your change provides.
+
+To check the change in performance across multiple versions of `git`, you can use the
+`t/perf/run` script. For example, to compare the performance of `git rev-list` across the
+`core/master` and `core/next` branches compared to a `topic` branch, you can run
+
+```
+cd t/perf
+./run core/master core/next topic -- p0001-rev-list.sh
+```
+
+You can also set certain environment variables to help test the performance on different
+repositories or with more repetitions. The full list is available in
+[the `t/perf/README` file](t/perf/README),
+but here are a few important ones:
+
+```
+GIT_PERF_REPO=/path/to/repo
+GIT_PERF_LARGE_REPO=/path/to/large/repo
+GIT_PERF_REPEAT_COUNT=10
+```
+
+When running the performance tests on Linux, you may see a message "Can't locate JSON.pm in
+@INC" and that means you need to run `sudo cpanm install JSON` to get the JSON perl package.
+
+For running performance tests, it can be helpful to set up a few repositories with strange
+data shapes, such as:
+
+**Many objects:** Clone repos such as [Kotlin](https://github.com/jetbrains/kotlin), [Linux](https://github.com/torvalds/linux), or [Android](https://source.android.com/setup/downloading).
+
+**Many pack-files:** You can split a fresh clone into multiple pack-files of size at most
+16MB by running `git repack -adfF --max-pack-size=16m`. See the
+[`git repack` documentation](https://git-scm.com/docs/git-repack) for more information.
+You can count the number of pack-files using `ls .git/objects/pack/*.pack | wc -l`.
+
+**Many loose objects:** If you already split your repository into multiple pack-files, then
+you can pick one to split into loose objects using `cat .git/objects/pack/[id].pack | git unpack-objects`;
+delete the `[id].pack` and `[id].idx` files after this. You can count the number of loose
+bjects using `ls .git/objects/??/* | wc -l`.
+
+**Deep history:** Usually large repositories also have deep histories, but you can use the
+[test-many-commits-1m repo](https://github.com/cirosantilli/test-many-commits-1m/) to
+target deep histories without the overhead of many objects. One issue with this repository:
+there are no merge commits, so you will need to use a different repository to test a "wide"
+commit history.
+
+**Large Index:** You can generate a large index and repo by using the scripts in
+`t/perf/repos`. There are two scripts. `many-files.sh` which will generate a repo with
+same tree and blobs but different paths. Using `many-files.sh -d 5 -w 10 -f 9` will create
+a repo with ~1 million entries in the index. `inflate-repo.sh` will use an existing repo
+and copy the current work tree until it is a specified size.
+
+Test Your Changes on Linux
+--------------------------
+
+It can be important to work directly on the [core Git codebase](https://github.com/git/git),
+such as a recent commit into the `master` or `next` branch that has not been incorporated
+into Git for Windows. Also, it can help to run functional and performance tests on your
+code in Linux before submitting patches to the mailing list, which focuses on many platforms.
+The differences between Windows and Linux are usually enough to catch most cross-platform
+issues.
+
+### Using the Windows Subsystem for Linux
+
+The [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+allows you to [install Ubuntu Linux as an app](https://www.microsoft.com/en-us/store/p/ubuntu/9nblggh4msv6)
+that can run Linux executables on top of the Windows kernel. Internally,
+Linux syscalls are interpreted by the WSL, everything else is plain Ubuntu.
+
+First, open WSL (either type "Bash" in Cortana, or execute "bash.exe" in a CMD window).
+Then install the prerequisites, and `git` for the initial clone:
+
+```
+sudo apt-get update
+sudo apt-get install git gcc make libssl-dev libcurl4-openssl-dev \
+ libexpat-dev tcl tk gettext git-email zlib1g-dev
+```
+
+Then, clone and build:
+
+```
+git clone https://github.com/git-for-windows/git
+cd git
+git remote add -f upstream https://github.com/git/git
+make
+```
+
+Be sure to clone into `/home/[user]/` and not into any folder under `/mnt/?/` or your build
+will fail due to colons in file names.
+
+### Using a Linux Virtual Machine with Hyper-V
+
+If you prefer, you can use a virtual machine (VM) to run Linux and test your changes in the
+full environment. The test suite runs a lot faster on Linux than on Windows or with the WSL.
+You can connect to the VM using an SSH terminal like
+[PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/).
+
+The following instructions are for using Hyper-V, which is available in some versions of Windows.
+There are many virtual machine alternatives available, if you do not have such a version installed.
+
+* [Download an Ubuntu Server ISO](https://www.ubuntu.com/download/server).
+* Open [Hyper-V Manager](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v).
+* [Set up a virtual switch](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/connect-to-network)
+ so your VM can reach the network.
+* Select "Quick Create", name your machine, select the ISO as installation source, and un-check
+ "This virtual machine will run Windows."
+* Go through the Ubuntu install process, being sure to select to install OpenSSH Server.
+* When install is complete, log in and check the SSH server status with `sudo service ssh status`.
+ * If the service is not found, install with `sudo apt-get install openssh-server`.
+ * If the service is not running, then use `sudo service ssh start`.
+* Use `shutdown -h now` to shutdown the VM, go to the Hyper-V settings for the VM, expand Network Adapter
+ to select "Advanced Features", and set the MAC address to be static (this can save your VM from losing
+ network if shut down incorrectly).
+* Provide as many cores to your VM as you can (for parallel builds).
+* Restart your VM, but do not connect.
+* Use `ssh` in Git Bash, download [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/), or use your favorite SSH client to connect to the VM through SSH.
+
+In order to build and use `git`, you will need the following libraries via `apt-get`:
+
+```
+sudo apt-get update
+sudo apt-get install git gcc make libssl-dev libcurl4-openssl-dev \
+ libexpat-dev tcl tk gettext git-email zlib1g-dev
+```
+
+To get your code from your Windows machine to the Linux VM, it is easiest to push the branch to your fork of Git and clone your fork in the Linux VM.
+
+Don't forget to set your `git` config with your preferred name, email, and editor.
+
+Polish Your Commits
+-------------------
+
+Before submitting your patch, be sure to read the [coding guidelines](https://github.com/git/git/blob/master/Documentation/CodingGuidelines)
+and check your code to match as best you can. This can be a lot of effort, but it saves
+time during review to avoid style issues.
+
+The other possibly major difference between the mailing list submissions and GitHub PR workflows
+is that each commit will be reviewed independently. Even if you are submitting a
+patch series with multiple commits, each commit must stand on it's own and be reviewable
+by itself. Make sure the commit message clearly explain the why of the commit not the how.
+Describe what is wrong with the current code and how your changes have made the code better.
+
+When preparing your patch, it is important to put yourself in the shoes of the Git community.
+Accepting a patch requires more justification than approving a pull request from someone on
+your team. The community has a stable product and is responsible for keeping it stable. If
+you introduce a bug, then they cannot count on you being around to fix it. When you decided
+to start work on a new feature, they were not part of the design discussion and may not
+even believe the feature is worth introducing.
+
+Questions to answer in your patch message (and commit messages) may include:
+* Why is this patch necessary?
+* How does the current behavior cause pain for users?
+* What kinds of repositories are necessary for noticing a difference?
+* What design options did you consider before writing this version? Do you have links to
+ code for those alternate designs?
+* Is this a performance fix? Provide clear performance numbers for various well-known repos.
+
+Here are some other tips that we use when cleaning up our commits:
+
+* Commit messages should be wrapped at 76 columns per line (or less; 72 is also a
+ common choice).
+* Make sure the commits are signed off using `git commit (-s|--signoff)`. See
+ [SubmittingPatches](https://github.com/git/git/blob/v2.8.1/Documentation/SubmittingPatches#L234-L286)
+ for more details about what this sign-off means.
+* Check for whitespace errors using `git diff --check [base]...HEAD` or `git log --check`.
+* Run `git rebase --whitespace=fix` to correct upstream issues with whitespace.
+* Become familiar with interactive rebase (`git rebase -i`) because you will be reordering,
+ squashing, and editing commits as your patch or series of patches is reviewed.
+* Make sure any shell scripts that you add have the executable bit set on them. This is
+ usually for test files that you add in the `/t` directory. You can use
+ `git add --chmod=+x [file]` to update it. You can test whether a file is marked as executable
+ using `git ls-files --stage \*.sh`; the first number is 100755 for executable files.
+* Your commit titles should match the "area: change description" format. Rules of thumb:
+ * Choose ": " prefix appropriately.
+ * Keep the description short and to the point.
+ * The word that follows the ": " prefix is not capitalized.
+ * Do not include a full-stop at the end of the title.
+ * Read a few commit messages -- using `git log origin/master`, for instance -- to
+ become acquainted with the preferred commit message style.
+* Build source using `make DEVELOPER=1` for extra-strict compiler warnings.
+
+Submit Your Patch
+-----------------
+
+Git for Windows [accepts pull requests on GitHub](https://github.com/git-for-windows/git/pulls), but
+these are reserved for Windows-specific improvements. For core Git, submissions are accepted on
+[the Git mailing list](https://public-inbox.org/git).
+
+### Configure Git to Send Emails
+
+There are a bunch of options for configuring the `git send-email` command. These options can
+be found in the documentation for
+[`git config`](https://git-scm.com/docs/git-config) and
+[`git send-email`](https://git-scm.com/docs/git-send-email).
+
+```
+git config --global sendemail.smtpserver
+git config --global sendemail.smtpserverport 587
+git config --global sendemail.smtpencryption tls
+git config --global sendemail.smtpuser
+```
+
+To avoid storing your password in the config file, store it in the Git credential manager:
+
+```
+$ git credential fill
+protocol=smtp
+host=
+username=
+password=password
+```
+
+Before submitting a patch, read the [Git documentation on submitting patches](https://github.com/git/git/blob/master/Documentation/SubmittingPatches).
+
+To construct a patch set, use the `git format-patch` command. There are three important options:
+
+* `--cover-letter`: If specified, create a `[v#-]0000-cover-letter.patch` file that can be
+ edited to describe the patch as a whole. If you previously added a branch description using
+ `git branch --edit-description`, you will end up with a 0/N mail with that description and
+ a nice overall diffstat.
+* `--in-reply-to=[Message-ID]`: This will mark your cover letter as replying to the given
+ message (which should correspond to your previous iteration). To determine the correct Message-ID,
+ find the message you are replying to on [public-inbox.org/git](https://public-inbox.org/git) and take
+ the ID from between the angle brackets.
+
+* `--subject-prefix=[prefix]`: This defaults to [PATCH]. For subsequent iterations, you will want to
+ override it like `--subject-prefix="[PATCH v2]"`. You can also use the `-v` option to have it
+ automatically generate the version number in the patches.
+
+If you have multiple commits and use the `--cover-letter` option be sure to open the
+`0000-cover-letter.patch` file to update the subject and add some details about the overall purpose
+of the patch series.
+
+### Examples
+
+To generate a single commit patch file:
+```
+git format-patch -s -o [dir] -1
+```
+To generate four patch files from the last three commits with a cover letter:
+```
+git format-patch --cover-letter -s -o [dir] HEAD~4
+```
+To generate version 3 with four patch files from the last four commits with a cover letter:
+```
+git format-patch --cover-letter -s -o [dir] -v 3 HEAD~4
+```
+
+### Submit the Patch
+
+Run [`git send-email`](https://git-scm.com/docs/git-send-email), starting with a test email:
+
+```
+git send-email --to=yourself@address.com [dir with patches]/*.patch
+```
+
+After checking the receipt of your test email, you can send to the list and to any
+potentially interested reviewers.
+
+```
+git send-email --to=git@vger.kernel.org --cc= --cc= [dir with patches]/*.patch
+```
+
+To submit a nth version patch (say version 3):
+
+```
+git send-email --to=git@vger.kernel.org --cc= --cc= \
+ --in-reply-to= [dir with patches]/*.patch
+```
diff --git a/Documentation/config.adoc b/Documentation/config.adoc
index 62eebe7c54501c..bd7187c7b48e4b 100644
--- a/Documentation/config.adoc
+++ b/Documentation/config.adoc
@@ -519,10 +519,14 @@ include::config/safe.adoc[]
include::config/sendemail.adoc[]
+include::config/sendpack.adoc[]
+
include::config/sequencer.adoc[]
include::config/showbranch.adoc[]
+include::config/sideband.adoc[]
+
include::config/sparse.adoc[]
include::config/splitindex.adoc[]
@@ -535,6 +539,8 @@ include::config/status.adoc[]
include::config/submodule.adoc[]
+include::config/survey.adoc[]
+
include::config/tag.adoc[]
include::config/tar.adoc[]
@@ -557,4 +563,6 @@ include::config/versionsort.adoc[]
include::config/web.adoc[]
+include::config/windows.adoc[]
+
include::config/worktree.adoc[]
diff --git a/Documentation/config/advice.adoc b/Documentation/config/advice.adoc
index 257db58918179a..28fb0e4a18f9f3 100644
--- a/Documentation/config/advice.adoc
+++ b/Documentation/config/advice.adoc
@@ -64,6 +64,9 @@ all advice messages.
set their identity configuration.
mergeConflict::
Shown when various commands stop because of conflicts.
+ nameTooLong::
+ Advice shown if a filepath operation is attempted where the
+ path was too long.
nestedTag::
Shown when a user attempts to recursively tag a tag object.
pushAlreadyExists::
@@ -166,4 +169,8 @@ all advice messages.
Shown when the user tries to create a worktree from an
invalid reference, to tell the user how to create a new unborn
branch instead.
+
+ useCoreFSMonitorConfig::
+ Advice shown if the deprecated 'core.useBuiltinFSMonitor' config
+ setting is in use.
--
diff --git a/Documentation/config/core.adoc b/Documentation/config/core.adoc
index a0ebf03e2eb050..cac7438e7de505 100644
--- a/Documentation/config/core.adoc
+++ b/Documentation/config/core.adoc
@@ -721,6 +721,19 @@ relatively high IO latencies. When enabled, Git will do the
index comparison to the filesystem data in parallel, allowing
overlapping IO's. Defaults to true.
+core.fscache::
+ Enable additional caching of file system data for some operations.
++
+Git for Windows uses this to bulk-read and cache lstat data of entire
+directories (instead of doing lstat file by file).
+
+core.longpaths::
+ Enable long path (> 260) support for builtin commands in Git for
+ Windows. This is disabled by default, as long paths are not supported
+ by Windows Explorer, cmd.exe and the Git for Windows tool chain
+ (msys, bash, tcl, perl...). Only enable this if you know what you're
+ doing and are prepared to live with a few quirks.
+
core.unsetenvvars::
Windows-only: comma-separated list of environment variables'
names that need to be unset before spawning any other process.
@@ -788,3 +801,9 @@ core.maxTreeDepth::
to allow Git to abort cleanly, and should not generally need to
be adjusted. When Git is compiled with MSVC, the default is 512.
Otherwise, the default is 2048.
+
+core.WSLCompat::
+ Tells Git whether to enable wsl compatibility mode.
+ The default value is false. When set to true, Git will set the mode
+ bits of the file in the way of wsl, so that the executable flag of
+ files can be set or read correctly.
diff --git a/Documentation/config/http.adoc b/Documentation/config/http.adoc
index 849c89f36c5ad8..741ff5dd6eeab3 100644
--- a/Documentation/config/http.adoc
+++ b/Documentation/config/http.adoc
@@ -231,13 +231,20 @@ http.sslKeyType::
See also libcurl `CURLOPT_SSLKEYTYPE`. Can be overridden by the
`GIT_SSL_KEY_TYPE` environment variable.
+http.allowNTLMAuth::
+ Whether or not to allow NTLM authentication. While very convenient to set
+ up, and therefore still used in many on-prem scenarios, NTLM is a weak
+ authentication method and therefore deprecated. Defaults to "false".
+
http.schannelCheckRevoke::
Used to enforce or disable certificate revocation checks in cURL
- when http.sslBackend is set to "schannel". Defaults to `true` if
- unset. Only necessary to disable this if Git consistently errors
- and the message is about checking the revocation status of a
- certificate. This option is ignored if cURL lacks support for
- setting the relevant SSL option at runtime.
+ when http.sslBackend is set to "schannel" via "true" and "false",
+ respectively. Another accepted value is "best-effort" (the default)
+ in which case revocation checks are performed, but errors due to
+ revocation list distribution points that are offline are silently
+ ignored, as well as errors due to certificates missing revocation
+ list distribution points. This option is ignored if cURL lacks
+ support for setting the relevant SSL option at runtime.
http.schannelUseSSLCAInfo::
As of cURL v7.60.0, the Secure Channel backend can use the
@@ -247,6 +254,11 @@ http.schannelUseSSLCAInfo::
when the `schannel` backend was configured via `http.sslBackend`,
unless `http.schannelUseSSLCAInfo` overrides this behavior.
+http.sslAutoClientCert::
+ As of cURL v7.77.0, the Secure Channel backend won't automatically
+ send client certificates from the Windows Certificate Store anymore.
+ To opt in to the old behavior, http.sslAutoClientCert can be set.
+
http.pinnedPubkey::
Public key of the https service. It may either be the filename of
a PEM or DER encoded public key file or a string starting with
diff --git a/Documentation/config/sendpack.adoc b/Documentation/config/sendpack.adoc
new file mode 100644
index 00000000000000..e306f657fba7dd
--- /dev/null
+++ b/Documentation/config/sendpack.adoc
@@ -0,0 +1,5 @@
+sendpack.sideband::
+ Allows to disable the side-band-64k capability for send-pack even
+ when it is advertised by the server. Makes it possible to work
+ around a limitation in the git for windows implementation together
+ with the dump git protocol. Defaults to true.
diff --git a/Documentation/config/sideband.adoc b/Documentation/config/sideband.adoc
new file mode 100644
index 00000000000000..f347fd6b33004a
--- /dev/null
+++ b/Documentation/config/sideband.adoc
@@ -0,0 +1,16 @@
+sideband.allowControlCharacters::
+ By default, control characters that are delivered via the sideband
+ are masked, except ANSI color sequences. This prevents potentially
+ unwanted ANSI escape sequences from being sent to the terminal. Use
+ this config setting to override this behavior:
++
+--
+ color::
+ Allow ANSI color sequences, line feeds and horizontal tabs,
+ but mask all other control characters. This is the default.
+ false::
+ Mask all control characters other than line feeds and
+ horizontal tabs.
+ true::
+ Allow all control characters to be sent to the terminal.
+--
diff --git a/Documentation/config/survey.adoc b/Documentation/config/survey.adoc
new file mode 100644
index 00000000000000..9e594a2092f225
--- /dev/null
+++ b/Documentation/config/survey.adoc
@@ -0,0 +1,14 @@
+survey.*::
+ These variables adjust the default behavior of the `git survey`
+ command. The intention is that this command could be run in the
+ background with these options.
++
+--
+ verbose::
+ This boolean value implies the `--[no-]verbose` option.
+ progress::
+ This boolean value implies the `--[no-]progress` option.
+ top::
+ This integer value implies `--top=`, specifying the
+ number of entries in the detail tables.
+--
diff --git a/Documentation/config/windows.adoc b/Documentation/config/windows.adoc
new file mode 100644
index 00000000000000..fdaaf1c65504f3
--- /dev/null
+++ b/Documentation/config/windows.adoc
@@ -0,0 +1,4 @@
+windows.appendAtomically::
+ By default, append atomic API is used on windows. But it works only with
+ local disk files, if you're working on a network file system, you should
+ set it false to turn it off.
diff --git a/Documentation/git-reset.adoc b/Documentation/git-reset.adoc
index 5023b5069972ca..933e2fac7dd662 100644
--- a/Documentation/git-reset.adoc
+++ b/Documentation/git-reset.adoc
@@ -12,6 +12,7 @@ git reset [--soft | --mixed [-N] | --hard | --merge | --keep] [-q] []
git reset [-q] [] [--] ...
git reset [-q] [--pathspec-from-file= [--pathspec-file-nul]] []
git reset (--patch | -p) [] [--] [...]
+DEPRECATED: git reset [-q] [--stdin [-z]] []
DESCRIPTION
-----------
@@ -139,6 +140,16 @@ include::diff-context-options.adoc[]
+
For more details, see the 'pathspec' entry in linkgit:gitglossary[7].
+`--stdin`::
+ DEPRECATED (use `--pathspec-from-file=-` instead): Instead of taking
+ list of paths from the command line, read list of paths from the
+ standard input. Paths are separated by LF (i.e. one path per line) by
+ default.
+
+`-z`::
+ DEPRECATED (use `--pathspec-file-nul` instead): Only meaningful with
+ `--stdin`; paths are separated with NUL character instead of LF.
+
EXAMPLES
--------
diff --git a/Documentation/git-survey.adoc b/Documentation/git-survey.adoc
new file mode 100644
index 00000000000000..44f3a0568b7697
--- /dev/null
+++ b/Documentation/git-survey.adoc
@@ -0,0 +1,83 @@
+git-survey(1)
+=============
+
+NAME
+----
+git-survey - EXPERIMENTAL: Measure various repository dimensions of scale
+
+SYNOPSIS
+--------
+[verse]
+(EXPERIMENTAL!) 'git survey'
+
+DESCRIPTION
+-----------
+
+Survey the repository and measure various dimensions of scale.
+
+As repositories grow to "monorepo" size, certain data shapes can cause
+performance problems. `git-survey` attempts to measure and report on
+known problem areas.
+
+Ref Selection and Reachable Objects
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this first analysis phase, `git survey` will iterate over the set of
+requested branches, tags, and other refs and treewalk over all of the
+reachable commits, trees, and blobs and generate various statistics.
+
+OPTIONS
+-------
+
+--progress::
+ Show progress. This is automatically enabled when interactive.
+
+Ref Selection
+~~~~~~~~~~~~~
+
+The following options control the set of refs that `git survey` will examine.
+By default, `git survey` will look at tags, local branches, and remote refs.
+If any of the following options are given, the default set is cleared and
+only refs for the given options are added.
+
+--all-refs::
+ Use all refs. This includes local branches, tags, remote refs,
+ notes, and stashes. This option overrides all of the following.
+
+--branches::
+ Add local branches (`refs/heads/`) to the set.
+
+--tags::
+ Add tags (`refs/tags/`) to the set.
+
+--remotes::
+ Add remote branches (`refs/remote/`) to the set.
+
+--detached::
+ Add HEAD to the set.
+
+--other::
+ Add notes (`refs/notes/`) and stashes (`refs/stash/`) to the set.
+
+OUTPUT
+------
+
+By default, `git survey` will print information about the repository in a
+human-readable format that includes overviews and tables.
+
+References Summary
+~~~~~~~~~~~~~~~~~~
+
+The references summary includes a count of each kind of reference,
+including branches, remote refs, and tags (split by "all" and
+"annotated").
+
+Reachable Object Summary
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The reachable object summary shows the total number of each kind of Git
+object, including tags, commits, trees, and blobs.
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git-svn.adoc b/Documentation/git-svn.adoc
index c26c12bab37abf..047c412018adcc 100644
--- a/Documentation/git-svn.adoc
+++ b/Documentation/git-svn.adoc
@@ -9,6 +9,7 @@ SYNOPSIS
--------
[verse]
'git svn' [] []
+(UNSUPPORTED!)
DESCRIPTION
-----------
diff --git a/Documentation/gitattributes.adoc b/Documentation/gitattributes.adoc
index f20041a323d174..7794bf0fd98dad 100644
--- a/Documentation/gitattributes.adoc
+++ b/Documentation/gitattributes.adoc
@@ -403,6 +403,36 @@ sign `$` upon checkout. Any byte sequence that begins with
with `$Id$` upon check-in.
+`symlink`
+^^^^^^^^^
+
+On Windows, symbolic links have a type: a "file symlink" must point at
+a file, and a "directory symlink" must point at a directory. If the
+type of symlink does not match its target, it doesn't work.
+
+Git does not record the type of symlink in the index or in a tree. On
+checkout it'll guess the type, which only works if the target exists
+at the time the symlink is created. This may often not be the case,
+for example when the link points at a directory inside a submodule.
+
+The `symlink` attribute allows you to explicitly set the type of symlink
+to `file` or `dir`, so Git doesn't have to guess. If you have a set of
+symlinks that point at other files, you can do:
+
+------------------------
+*.gif symlink=file
+------------------------
+
+To tell Git that a symlink points at a directory, use:
+
+------------------------
+tools_folder symlink=dir
+------------------------
+
+The `symlink` attribute is ignored on platforms other than Windows,
+since they don't distinguish between different types of symlinks.
+
+
`filter`
^^^^^^^^
diff --git a/Documentation/meson.build b/Documentation/meson.build
index d6365b888bbed3..17d437d8ded045 100644
--- a/Documentation/meson.build
+++ b/Documentation/meson.build
@@ -144,6 +144,7 @@ manpages = {
'git-status.adoc' : 1,
'git-stripspace.adoc' : 1,
'git-submodule.adoc' : 1,
+ 'git-survey.adoc' : 1,
'git-svn.adoc' : 1,
'git-switch.adoc' : 1,
'git-symbolic-ref.adoc' : 1,
diff --git a/Makefile b/Makefile
index dbf00220541ce1..c19d5c9cca6269 100644
--- a/Makefile
+++ b/Makefile
@@ -483,6 +483,11 @@ include shared.mak
#
# CURL_LDFLAGS=-lcurl
#
+# Define LAZYLOAD_LIBCURL to dynamically load the libcurl; This can be useful
+# if Multiple libcurl versions exist (with different file names) that link to
+# various SSL/TLS backends, to support the `http.sslBackend` runtime switch in
+# such a scenario.
+#
# === Optional library: libpcre2 ===
#
# Define USE_LIBPCRE if you have and want to use libpcre. Various
@@ -832,6 +837,7 @@ TEST_BUILTINS_OBJS += test-hash-speed.o
TEST_BUILTINS_OBJS += test-hash.o
TEST_BUILTINS_OBJS += test-hashmap.o
TEST_BUILTINS_OBJS += test-hexdump.o
+TEST_BUILTINS_OBJS += test-iconv.o
TEST_BUILTINS_OBJS += test-json-writer.o
TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
TEST_BUILTINS_OBJS += test-match-trees.o
@@ -1488,6 +1494,7 @@ BUILTIN_OBJS += builtin/sparse-checkout.o
BUILTIN_OBJS += builtin/stash.o
BUILTIN_OBJS += builtin/stripspace.o
BUILTIN_OBJS += builtin/submodule--helper.o
+BUILTIN_OBJS += builtin/survey.o
BUILTIN_OBJS += builtin/symbolic-ref.o
BUILTIN_OBJS += builtin/tag.o
BUILTIN_OBJS += builtin/unpack-file.o
@@ -1511,6 +1518,7 @@ BUILTIN_OBJS += builtin/write-tree.o
# upstream unnecessarily (making merging in future changes easier).
THIRD_PARTY_SOURCES += compat/inet_ntop.c
THIRD_PARTY_SOURCES += compat/inet_pton.c
+THIRD_PARTY_SOURCES += compat/mimalloc/%
THIRD_PARTY_SOURCES += compat/nedmalloc/%
THIRD_PARTY_SOURCES += compat/obstack.%
THIRD_PARTY_SOURCES += compat/poll/%
@@ -1527,6 +1535,7 @@ CLAR_TEST_SUITES += u-hash
CLAR_TEST_SUITES += u-hashmap
CLAR_TEST_SUITES += u-list-objects-filter-options
CLAR_TEST_SUITES += u-mem-pool
+CLAR_TEST_SUITES += u-mingw
CLAR_TEST_SUITES += u-oid-array
CLAR_TEST_SUITES += u-oidmap
CLAR_TEST_SUITES += u-oidtree
@@ -1788,10 +1797,23 @@ else
CURL_LIBCURL =
endif
- ifndef CURL_LDFLAGS
- CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS)
+ ifdef LAZYLOAD_LIBCURL
+ LAZYLOAD_LIBCURL_OBJ = compat/lazyload-curl.o
+ OBJECTS += $(LAZYLOAD_LIBCURL_OBJ)
+ # The `CURL_STATICLIB` constant must be defined to avoid seeing the functions
+ # declared as DLL imports
+ CURL_CFLAGS = -DCURL_STATICLIB
+ifneq ($(uname_S),MINGW)
+ifneq ($(uname_S),Windows)
+ CURL_LIBCURL = -ldl
+endif
+endif
+ else
+ ifndef CURL_LDFLAGS
+ CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS)
+ endif
+ CURL_LIBCURL += $(CURL_LDFLAGS)
endif
- CURL_LIBCURL += $(CURL_LDFLAGS)
ifndef CURL_CFLAGS
CURL_CFLAGS = $(eval CURL_CFLAGS := $$(shell $$(CURL_CONFIG) --cflags))$(CURL_CFLAGS)
@@ -1812,7 +1834,7 @@ else
endif
ifdef USE_CURL_FOR_IMAP_SEND
BASIC_CFLAGS += -DUSE_CURL_FOR_IMAP_SEND
- IMAP_SEND_BUILDDEPS = http.o
+ IMAP_SEND_BUILDDEPS = http.o $(LAZYLOAD_LIBCURL_OBJ)
IMAP_SEND_LDFLAGS += $(CURL_LIBCURL)
endif
ifndef NO_EXPAT
@@ -2277,6 +2299,46 @@ ifdef USE_NED_ALLOCATOR
OVERRIDE_STRDUP = YesPlease
endif
+ifdef USE_MIMALLOC
+ MIMALLOC_OBJS = \
+ compat/mimalloc/alloc-aligned.o \
+ compat/mimalloc/alloc.o \
+ compat/mimalloc/arena.o \
+ compat/mimalloc/bitmap.o \
+ compat/mimalloc/heap.o \
+ compat/mimalloc/init.o \
+ compat/mimalloc/libc.o \
+ compat/mimalloc/options.o \
+ compat/mimalloc/os.o \
+ compat/mimalloc/page.o \
+ compat/mimalloc/random.o \
+ compat/mimalloc/prim/prim.o \
+ compat/mimalloc/segment.o \
+ compat/mimalloc/segment-map.o \
+ compat/mimalloc/stats.o
+
+ COMPAT_CFLAGS += -Icompat/mimalloc -DMI_DEBUG=0 -DUSE_MIMALLOC --std=gnu11
+ COMPAT_OBJS += $(MIMALLOC_OBJS)
+
+$(MIMALLOC_OBJS): COMPAT_CFLAGS += -DBANNED_H
+
+$(MIMALLOC_OBJS): COMPAT_CFLAGS += \
+ -DMI_WIN_USE_FLS \
+ -Wno-attributes \
+ -Wno-unknown-pragmas \
+ -Wno-unused-function \
+ -Wno-array-bounds
+
+ifdef DEVELOPER
+$(MIMALLOC_OBJS): COMPAT_CFLAGS += \
+ -Wno-pedantic \
+ -Wno-declaration-after-statement \
+ -Wno-old-style-definition \
+ -Wno-missing-prototypes \
+ -Wno-implicit-function-declaration
+endif
+endif
+
ifdef OVERRIDE_STRDUP
COMPAT_CFLAGS += -DOVERRIDE_STRDUP
COMPAT_OBJS += compat/strdup.o
@@ -3006,10 +3068,10 @@ git-imap-send$X: imap-send.o $(IMAP_SEND_BUILDDEPS) GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
$(IMAP_SEND_LDFLAGS) $(LIBS)
-git-http-fetch$X: http.o http-walker.o http-fetch.o GIT-LDFLAGS $(GITLIBS)
+git-http-fetch$X: http.o http-walker.o http-fetch.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
$(CURL_LIBCURL) $(LIBS)
-git-http-push$X: http.o http-push.o GIT-LDFLAGS $(GITLIBS)
+git-http-push$X: http.o http-push.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
@@ -3019,7 +3081,7 @@ $(REMOTE_CURL_ALIASES): $(REMOTE_CURL_PRIMARY)
ln -s $< $@ 2>/dev/null || \
cp $< $@
-$(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o GIT-LDFLAGS $(GITLIBS)
+$(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
@@ -3914,12 +3976,15 @@ ifdef MSVC
$(RM) $(patsubst %.o,%.o.pdb,$(OBJECTS))
$(RM) headless-git.o.pdb
$(RM) $(patsubst %.exe,%.pdb,$(OTHER_PROGRAMS))
+ $(RM) $(patsubst %.exe,%.ilk,$(OTHER_PROGRAMS))
$(RM) $(patsubst %.exe,%.iobj,$(OTHER_PROGRAMS))
$(RM) $(patsubst %.exe,%.ipdb,$(OTHER_PROGRAMS))
$(RM) $(patsubst %.exe,%.pdb,$(PROGRAMS))
+ $(RM) $(patsubst %.exe,%.ilk,$(PROGRAMS))
$(RM) $(patsubst %.exe,%.iobj,$(PROGRAMS))
$(RM) $(patsubst %.exe,%.ipdb,$(PROGRAMS))
$(RM) $(patsubst %.exe,%.pdb,$(TEST_PROGRAMS))
+ $(RM) $(patsubst %.exe,%.ilk,$(TEST_PROGRAMS))
$(RM) $(patsubst %.exe,%.iobj,$(TEST_PROGRAMS))
$(RM) $(patsubst %.exe,%.ipdb,$(TEST_PROGRAMS))
$(RM) compat/vcbuild/MSVC-DEFS-GEN
diff --git a/README.md b/README.md
index d87bca1b8c3ebf..026d5d85caef09 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,77 @@
-[](https://github.com/git/git/actions?query=branch%3Amaster+event%3Apush)
+Git for Windows
+===============
+
+[](CODE_OF_CONDUCT.md)
+[](https://open.vscode.dev/git-for-windows/git)
+[](https://github.com/git-for-windows/git/actions?query=branch%3Amain+event%3Apush)
+[](https://gitter.im/git-for-windows/git?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+This is [Git for Windows](http://git-for-windows.github.io/), the Windows port
+of [Git](http://git-scm.com/).
+
+The Git for Windows project is run using a [governance
+model](http://git-for-windows.github.io/governance-model.html). If you
+encounter problems, you can report them as [GitHub
+issues](https://github.com/git-for-windows/git/issues), discuss them in Git
+for Windows' [Discussions](https://github.com/git-for-windows/git/discussions)
+or on the [Git mailing list](mailto:git@vger.kernel.org), and [contribute bug
+fixes](https://gitforwindows.org/how-to-participate).
+
+To build Git for Windows, please either install [Git for Windows'
+SDK](https://gitforwindows.org/#download-sdk), start its `git-bash.exe`, `cd`
+to your Git worktree and run `make`, or open the Git worktree as a folder in
+Visual Studio.
+
+To verify that your build works, use one of the following methods:
+
+- If you want to test the built executables within Git for Windows' SDK,
+ prepend `/bin-wrappers` to the `PATH`.
+- Alternatively, run `make install` in the Git worktree.
+- If you need to test this in a full installer, run `sdk build
+ git-and-installer`.
+- You can also "install" Git into an existing portable Git via `make install
+ DESTDIR=` where `` refers to the top-level directory of the
+ portable Git. In this instance, you will want to prepend that portable Git's
+ `/cmd` directory to the `PATH`, or test by running that portable Git's
+ `git-bash.exe` or `git-cmd.exe`.
+- If you built using a recent Visual Studio, you can use the menu item
+ `Build>Install git` (you will want to click on `Project>CMake Settings for
+ Git` first, then click on `Edit JSON` and then point `installRoot` to the
+ `mingw64` directory of an already-unpacked portable Git).
+
+ As in the previous bullet point, you will then prepend `/cmd` to the `PATH`
+ or run using the portable Git's `git-bash.exe` or `git-cmd.exe`.
+- If you want to run the built executables in-place, but in a CMD instead of
+ inside a Bash, you can run a snippet like this in the `git-bash.exe` window
+ where Git was built (ensure that the `EOF` line has no leading spaces), and
+ then paste into the CMD window what was put in the clipboard:
+
+ ```sh
+ clip.exe <
(see https://subspace.kernel.org/subscribing.html for details). The mailing
list archives are available at ,
and other archival sites.
+The core git mailing list is plain text (no HTML!).
Issues which are security relevant should be disclosed privately to
the Git Security mailing list .
diff --git a/SECURITY.md b/SECURITY.md
index c720c2ae7f9580..42b6d458bfd557 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -28,24 +28,38 @@ Examples for details to include:
## Supported Versions
-There are no official "Long Term Support" versions in Git.
-Instead, the maintenance track (i.e. the versions based on the
-most recently published feature release, also known as ".0"
-version) sees occasional updates with bug fixes.
-
-Fixes to vulnerabilities are made for the maintenance track for
-the latest feature release and merged up to the in-development
-branches. The Git project makes no formal guarantee for any
-older maintenance tracks to receive updates. In practice,
-though, critical vulnerability fixes are applied not only to the
-most recent track, but to at least a couple more maintenance
-tracks.
-
-This is typically done by making the fix on the oldest and still
-relevant maintenance track, and merging it upwards to newer and
-newer maintenance tracks.
-
-For example, v2.24.1 was released to address a couple of
-[CVEs](https://cve.mitre.org/), and at the same time v2.14.6,
-v2.15.4, v2.16.6, v2.17.3, v2.18.2, v2.19.3, v2.20.2, v2.21.1,
-v2.22.2 and v2.23.1 were released.
+Git for Windows is a "friendly fork" of [Git](https://git-scm.com/), i.e. changes in Git for Windows are frequently contributed back, and Git for Windows' release cycle closely following Git's.
+
+While Git maintains several release trains (when v2.19.1 was released, there were updates to v2.14.x-v2.18.x, too, for example), Git for Windows follows only the latest Git release. For example, there is no Git for Windows release corresponding to Git v2.16.5 (which was released after v2.19.0).
+
+One exception is [MinGit for Windows](https://gitforwindows.org/mingit) (a minimal subset of Git for Windows, intended for bundling with third-party applications that do not need any interactive commands nor support for `git svn`): critical security fixes are backported to the v2.11.x, v2.14.x, v2.19.x, v2.21.x and v2.23.x release trains.
+
+## Version number scheme
+
+The Git for Windows versions reflect the Git version on which they are based. For example, Git for Windows v2.21.0 is based on Git v2.21.0.
+
+As Git for Windows bundles more than just Git (such as Bash, OpenSSL, OpenSSH, GNU Privacy Guard), sometimes there are interim releases without corresponding Git releases. In these cases, Git for Windows appends a number in parentheses, starting with the number 2, then 3, etc. For example, both Git for Windows v2.17.1 and v2.17.1(2) were based on Git v2.17.1, but the latter included updates for Git Credential Manager and Git LFS, fixing critical regressions.
+
+## Tag naming scheme
+
+Every Git for Windows version is tagged using a name that starts with the Git version on which it is based, with the suffix `.windows.` appended. For example, Git for Windows v2.17.1' source code is tagged as [`v2.17.1.windows.1`](https://github.com/git-for-windows/git/releases/tag/v2.17.1.windows.1) (the patch level is always at least 1, given that Git for Windows always has patches on top of Git). Likewise, Git for Windows v2.17.1(2)' source code is tagged as [`v2.17.1.windows.2`](https://github.com/git-for-windows/git/releases/tag/v2.17.1.windows.2).
+
+## Release Candidate (rc) versions
+
+As a friendly fork of Git (the "upstream" project), Git for Windows is closely corelated to that project.
+
+Consequently, Git for Windows publishes versions based on Git's release candidates (for upcoming "`.0`" versions, see [Git's release schedule](https://tinyurl.com/gitCal)). These versions end in `-rc`, starting with `-rc0` for a very early preview of what is to come, and as with regular versions, Git for Windows tries to follow Git's releases as quickly as possible.
+
+Note: there is currently a bug in the "Check daily for updates" code, where it mistakes the final version as a downgrade from release candidates. Example: if you installed Git for Windows v2.23.0-rc3 and enabled the auto-updater, it would ask you whether you want to "downgrade" to v2.23.0 when that version was available.
+
+[All releases](https://github.com/git-for-windows/git/releases/), including release candidates, are listed via a link at the footer of the [Git for Windows](https://gitforwindows.org/) home page.
+
+## Snapshot versions ('nightly builds')
+
+Git for Windows also provides snapshots (these are not releases) of the current development as per git-for-Windows/git's `master` branch at the [Snapshots](https://gitforwindows.org/git-snapshots/) page. This link is also listed in the footer of the [Git for Windows](https://gitforwindows.org/) home page.
+
+Note: even if those builds are not exactly "nightly", they are sometimes referred to as "nightly builds" to keep with other projects' nomenclature.
+
+## Following upstream's developments
+
+The [gitforwindows/git repository](https://github.com/git-for-windows/git) also provides the `shears/*` branches. The `shears/*` branches reflect Git for Windows' patches, rebased onto the upstream integration branches, [updated (mostly) via automated CI builds](https://dev.azure.com/git-for-windows/git/_build?definitionId=25).
diff --git a/abspath.c b/abspath.c
index 1202cde23dbc9b..0c17e98654e4b0 100644
--- a/abspath.c
+++ b/abspath.c
@@ -93,6 +93,9 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
goto error_out;
}
+ if (platform_strbuf_realpath(resolved, path))
+ return resolved->buf;
+
strbuf_addstr(&remaining, path);
get_root_part(resolved, &remaining);
diff --git a/advice.c b/advice.c
index 0018501b7bc103..71ddedd4ad46bb 100644
--- a/advice.c
+++ b/advice.c
@@ -61,6 +61,7 @@ static struct {
[ADVICE_IGNORED_HOOK] = { "ignoredHook" },
[ADVICE_IMPLICIT_IDENTITY] = { "implicitIdentity" },
[ADVICE_MERGE_CONFLICT] = { "mergeConflict" },
+ [ADVICE_NAME_TOO_LONG] = { "nameTooLong" },
[ADVICE_NESTED_TAG] = { "nestedTag" },
[ADVICE_OBJECT_NAME_WARNING] = { "objectNameWarning" },
[ADVICE_PUSH_ALREADY_EXISTS] = { "pushAlreadyExists" },
@@ -89,6 +90,7 @@ static struct {
[ADVICE_SUBMODULE_MERGE_CONFLICT] = { "submoduleMergeConflict" },
[ADVICE_SUGGEST_DETACHING_HEAD] = { "suggestDetachingHead" },
[ADVICE_UPDATE_SPARSE_PATH] = { "updateSparsePath" },
+ [ADVICE_USE_CORE_FSMONITOR_CONFIG] = { "useCoreFSMonitorConfig" },
[ADVICE_WAITING_FOR_EDITOR] = { "waitingForEditor" },
[ADVICE_WORKTREE_ADD_ORPHAN] = { "worktreeAddOrphan" },
};
diff --git a/advice.h b/advice.h
index 8def28068861df..849a5991379c11 100644
--- a/advice.h
+++ b/advice.h
@@ -28,6 +28,7 @@ enum advice_type {
ADVICE_IGNORED_HOOK,
ADVICE_IMPLICIT_IDENTITY,
ADVICE_MERGE_CONFLICT,
+ ADVICE_NAME_TOO_LONG,
ADVICE_NESTED_TAG,
ADVICE_OBJECT_NAME_WARNING,
ADVICE_PUSH_ALREADY_EXISTS,
@@ -56,6 +57,7 @@ enum advice_type {
ADVICE_SUBMODULE_MERGE_CONFLICT,
ADVICE_SUGGEST_DETACHING_HEAD,
ADVICE_UPDATE_SPARSE_PATH,
+ ADVICE_USE_CORE_FSMONITOR_CONFIG,
ADVICE_WAITING_FOR_EDITOR,
ADVICE_WORKTREE_ADD_ORPHAN,
};
diff --git a/apply.c b/apply.c
index 63d5e3c4609c11..3340abdf8994c4 100644
--- a/apply.c
+++ b/apply.c
@@ -4507,7 +4507,7 @@ static int try_create_file(struct apply_state *state, const char *path,
/* Although buf:size is counted string, it also is NUL
* terminated.
*/
- return !!symlink(buf, path);
+ return !!create_symlink(state && state->repo ? state->repo->index : NULL, buf, path);
fd = open(path, O_CREAT | O_EXCL | O_WRONLY, (mode & 0100) ? 0777 : 0666);
if (fd < 0)
diff --git a/builtin.h b/builtin.h
index 235c51f30e5380..85aa3e62892fb5 100644
--- a/builtin.h
+++ b/builtin.h
@@ -259,6 +259,7 @@ int cmd_sparse_checkout(int argc, const char **argv, const char *prefix, struct
int cmd_status(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_stash(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_stripspace(int argc, const char **argv, const char *prefix, struct repository *repo);
+int cmd_survey(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_submodule__helper(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_switch(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_symbolic_ref(int argc, const char **argv, const char *prefix, struct repository *repo);
diff --git a/builtin/add.c b/builtin/add.c
index 7737ab878bfceb..95e7267b017cbd 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -497,6 +497,10 @@ int cmd_add(int argc,
die_in_unpopulated_submodule(repo->index, prefix);
die_path_inside_submodule(repo->index, &pathspec);
+ enable_fscache(0);
+ /* We do not really re-read the index but update the up-to-date flags */
+ preload_index(repo->index, &pathspec, 0);
+
if (add_new_files) {
int baselen;
@@ -609,5 +613,6 @@ int cmd_add(int argc,
free(ps_matched);
dir_clear(&dir);
clear_pathspec(&pathspec);
+ enable_fscache(0);
return exit_status;
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index e031e6188613a6..c46405c0e39ed1 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -401,6 +401,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
if (pc_workers > 1)
init_parallel_checkout();
+ enable_fscache(the_repository->index->cache_nr);
for (pos = 0; pos < the_repository->index->cache_nr; pos++) {
struct cache_entry *ce = the_repository->index->cache[pos];
if (ce->ce_flags & CE_MATCHED) {
@@ -426,6 +427,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
errs |= run_parallel_checkout(&state, pc_workers, pc_threshold,
NULL, NULL);
mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
+ disable_fscache();
remove_marked_cache_entries(the_repository->index, 1);
remove_scheduled_dirs();
errs |= finish_delayed_checkout(&state, opts->show_progress);
diff --git a/builtin/clean.c b/builtin/clean.c
index 1d5e7e5366bf09..f8a54a4a47bc7b 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -26,6 +26,7 @@
#include "pathspec.h"
#include "help.h"
#include "prompt.h"
+#include "advice.h"
static int require_force = -1; /* unset */
static int interactive;
@@ -41,6 +42,10 @@ static const char *msg_remove = N_("Removing %s\n");
static const char *msg_would_remove = N_("Would remove %s\n");
static const char *msg_skip_git_dir = N_("Skipping repository %s\n");
static const char *msg_would_skip_git_dir = N_("Would skip repository %s\n");
+#ifndef CAN_UNLINK_MOUNT_POINTS
+static const char *msg_skip_mount_point = N_("Skipping mount point %s\n");
+static const char *msg_would_skip_mount_point = N_("Would skip mount point %s\n");
+#endif
static const char *msg_warn_remove_failed = N_("failed to remove %s");
static const char *msg_warn_lstat_failed = N_("could not lstat %s\n");
static const char *msg_skip_cwd = N_("Refusing to remove current working directory\n");
@@ -185,6 +190,29 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
goto out;
}
+ if (is_mount_point(path)) {
+#ifndef CAN_UNLINK_MOUNT_POINTS
+ if (!quiet) {
+ quote_path(path->buf, prefix, "ed, 0);
+ printf(dry_run ?
+ _(msg_would_skip_mount_point) :
+ _(msg_skip_mount_point), quoted.buf);
+ }
+ *dir_gone = 0;
+#else
+ if (!dry_run && unlink(path->buf)) {
+ int saved_errno = errno;
+ quote_path(path->buf, prefix, "ed, 0);
+ errno = saved_errno;
+ warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ *dir_gone = 0;
+ ret = -1;
+ }
+#endif
+
+ goto out;
+ }
+
dir = opendir(path->buf);
if (!dir) {
/* an empty dir could be removed even if it is unreadble */
@@ -194,6 +222,9 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
quote_path(path->buf, prefix, "ed, 0);
errno = saved_errno;
warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ if (saved_errno == ENAMETOOLONG) {
+ advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
+ }
*dir_gone = 0;
}
ret = res;
@@ -229,6 +260,9 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
quote_path(path->buf, prefix, "ed, 0);
errno = saved_errno;
warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ if (saved_errno == ENAMETOOLONG) {
+ advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
+ }
*dir_gone = 0;
ret = 1;
}
@@ -272,6 +306,9 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
quote_path(path->buf, prefix, "ed, 0);
errno = saved_errno;
warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ if (saved_errno == ENAMETOOLONG) {
+ advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
+ }
*dir_gone = 0;
ret = 1;
}
@@ -1015,6 +1052,7 @@ int cmd_clean(int argc,
if (repo_read_index(the_repository) < 0)
die(_("index file corrupt"));
+ enable_fscache(the_repository->index->cache_nr);
pl = add_pattern_list(&dir, EXC_CMDL, "--exclude option");
for (i = 0; i < exclude_list.nr; i++)
@@ -1081,6 +1119,9 @@ int cmd_clean(int argc,
qname = quote_path(item->string, NULL, &buf, 0);
errno = saved_errno;
warning_errno(_(msg_warn_remove_failed), qname);
+ if (saved_errno == ENAMETOOLONG) {
+ advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
+ }
errors++;
} else if (!quiet) {
qname = quote_path(item->string, NULL, &buf, 0);
@@ -1089,6 +1130,7 @@ int cmd_clean(int argc,
}
}
+ disable_fscache();
strbuf_release(&abs_path);
strbuf_release(&buf);
string_list_clear(&del_list, 0);
diff --git a/builtin/commit.c b/builtin/commit.c
index a3e52ac9ca6607..04adf2ef9dd801 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -1623,6 +1623,7 @@ struct repository *repo UNUSED)
PATHSPEC_PREFER_FULL,
prefix, argv);
+ enable_fscache(0);
if (status_format != STATUS_FORMAT_PORCELAIN &&
status_format != STATUS_FORMAT_PORCELAIN_V2)
progress_flag = REFRESH_PROGRESS;
@@ -1663,6 +1664,7 @@ struct repository *repo UNUSED)
wt_status_print(&s);
wt_status_collect_free_buffers(&s);
+ disable_fscache();
return 0;
}
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
index 7f733cb756e03c..3b8130d3d64f9c 100644
--- a/builtin/credential-cache.c
+++ b/builtin/credential-cache.c
@@ -23,7 +23,7 @@ static int connection_closed(int error)
static int connection_fatally_broken(int error)
{
- return (error != ENOENT) && (error != ENETDOWN);
+ return (error != ENOENT) && (error != ENETDOWN) && (error != ECONNREFUSED);
}
#else
diff --git a/builtin/difftool.c b/builtin/difftool.c
index e4bc1f831696a8..8d10e2489f088e 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -544,7 +544,7 @@ static int run_dir_diff(struct repository *repo,
}
add_path(&wtdir, wtdir_len, dst_path);
if (dt_options->symlinks) {
- if (symlink(wtdir.buf, rdir.buf)) {
+ if (create_symlink(lstate.istate, wtdir.buf, rdir.buf)) {
ret = error_errno("could not symlink '%s' to '%s'", wtdir.buf, rdir.buf);
goto finish;
}
diff --git a/builtin/reset.c b/builtin/reset.c
index 3590be57a5f03c..1cd7e61fe45e90 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -38,6 +38,8 @@
#include "trace2.h"
#include "dir.h"
#include "add-interactive.h"
+#include "strbuf.h"
+#include "quote.h"
#define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000)
@@ -46,6 +48,7 @@ static const char * const git_reset_usage[] = {
N_("git reset [-q] [] [--] ..."),
N_("git reset [-q] [--pathspec-from-file [--pathspec-file-nul]] []"),
N_("git reset --patch [] [--] [...]"),
+ N_("DEPRECATED: git reset [-q] [--stdin [-z]] []"),
NULL
};
@@ -347,6 +350,7 @@ int cmd_reset(int argc,
struct pathspec pathspec;
int intent_to_add = 0;
struct interactive_options interactive_opts = INTERACTIVE_OPTIONS_INIT;
+ int nul_term_line = 0, read_from_stdin = 0;
const struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_BOOL(0, "no-refresh", &no_refresh,
@@ -379,6 +383,10 @@ int cmd_reset(int argc,
N_("record only the fact that removed paths will be added later")),
OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("DEPRECATED (use --pathspec-file-nul instead): paths are separated with NUL character")),
+ OPT_BOOL(0, "stdin", &read_from_stdin,
+ N_("DEPRECATED (use --pathspec-from-file=- instead): read paths from ")),
OPT_END()
};
@@ -388,6 +396,14 @@ int cmd_reset(int argc,
PARSE_OPT_KEEP_DASHDASH);
parse_args(&pathspec, argv, prefix, patch_mode, &rev);
+ if (read_from_stdin) {
+ warning(_("--stdin is deprecated, please use --pathspec-from-file=- instead"));
+ free(pathspec_from_file);
+ pathspec_from_file = xstrdup("-");
+ if (nul_term_line)
+ pathspec_file_nul = 1;
+ }
+
if (pathspec_from_file) {
if (patch_mode)
die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--patch");
diff --git a/builtin/survey.c b/builtin/survey.c
new file mode 100644
index 00000000000000..f40905fb2fd57a
--- /dev/null
+++ b/builtin/survey.c
@@ -0,0 +1,934 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
+#include "builtin.h"
+#include "config.h"
+#include "environment.h"
+#include "hex.h"
+#include "object.h"
+#include "odb.h"
+#include "object-name.h"
+#include "parse-options.h"
+#include "path-walk.h"
+#include "progress.h"
+#include "ref-filter.h"
+#include "refs.h"
+#include "revision.h"
+#include "strbuf.h"
+#include "strvec.h"
+#include "tag.h"
+#include "trace2.h"
+#include "color.h"
+
+static const char * const survey_usage[] = {
+ N_("(EXPERIMENTAL!) git survey "),
+ NULL,
+};
+
+struct survey_refs_wanted {
+ int want_all_refs; /* special override */
+
+ int want_branches;
+ int want_tags;
+ int want_remotes;
+ int want_detached;
+ int want_other; /* see FILTER_REFS_OTHERS -- refs/notes/, refs/stash/ */
+};
+
+static struct survey_refs_wanted default_ref_options = {
+ .want_all_refs = 1,
+};
+
+struct survey_opts {
+ int verbose;
+ int show_progress;
+ int top_nr;
+ struct survey_refs_wanted refs;
+};
+
+struct survey_report_ref_summary {
+ size_t refs_nr;
+ size_t branches_nr;
+ size_t remote_refs_nr;
+ size_t tags_nr;
+ size_t tags_annotated_nr;
+ size_t others_nr;
+ size_t unknown_nr;
+};
+
+struct survey_report_object_summary {
+ size_t commits_nr;
+ size_t tags_nr;
+ size_t trees_nr;
+ size_t blobs_nr;
+};
+
+/**
+ * For some category given by 'label', count the number of objects
+ * that match that label along with the on-disk size and the size
+ * after decompressing (both with delta bases and zlib).
+ */
+struct survey_report_object_size_summary {
+ char *label;
+ size_t nr;
+ size_t disk_size;
+ size_t inflated_size;
+ size_t num_missing;
+};
+
+typedef int (*survey_top_cmp)(void *v1, void *v2);
+
+static int cmp_by_nr(void *v1, void *v2)
+{
+ struct survey_report_object_size_summary *s1 = v1;
+ struct survey_report_object_size_summary *s2 = v2;
+
+ if (s1->nr < s2->nr)
+ return -1;
+ if (s1->nr > s2->nr)
+ return 1;
+ return 0;
+}
+
+static int cmp_by_disk_size(void *v1, void *v2)
+{
+ struct survey_report_object_size_summary *s1 = v1;
+ struct survey_report_object_size_summary *s2 = v2;
+
+ if (s1->disk_size < s2->disk_size)
+ return -1;
+ if (s1->disk_size > s2->disk_size)
+ return 1;
+ return 0;
+}
+
+static int cmp_by_inflated_size(void *v1, void *v2)
+{
+ struct survey_report_object_size_summary *s1 = v1;
+ struct survey_report_object_size_summary *s2 = v2;
+
+ if (s1->inflated_size < s2->inflated_size)
+ return -1;
+ if (s1->inflated_size > s2->inflated_size)
+ return 1;
+ return 0;
+}
+
+/**
+ * Store a list of "top" categories by some sorting function. When
+ * inserting a new category, reorder the list and free the one that
+ * got ejected (if any).
+ */
+struct survey_report_top_table {
+ const char *name;
+ survey_top_cmp cmp_fn;
+ size_t nr;
+ size_t alloc;
+
+ /**
+ * 'data' stores an array of structs and must be cast into
+ * the proper array type before evaluating an index.
+ */
+ void *data;
+};
+
+static void init_top_sizes(struct survey_report_top_table *top,
+ size_t limit, const char *name,
+ survey_top_cmp cmp)
+{
+ struct survey_report_object_size_summary *sz_array;
+
+ top->name = name;
+ top->cmp_fn = cmp;
+ top->alloc = limit;
+ top->nr = 0;
+
+ CALLOC_ARRAY(sz_array, limit);
+ top->data = sz_array;
+}
+
+MAYBE_UNUSED
+static void clear_top_sizes(struct survey_report_top_table *top)
+{
+ struct survey_report_object_size_summary *sz_array = top->data;
+
+ for (size_t i = 0; i < top->nr; i++)
+ free(sz_array[i].label);
+ free(sz_array);
+}
+
+static void maybe_insert_into_top_size(struct survey_report_top_table *top,
+ struct survey_report_object_size_summary *summary)
+{
+ struct survey_report_object_size_summary *sz_array = top->data;
+ size_t pos = top->nr;
+
+ /* Compare against list from the bottom. */
+ while (pos > 0 && top->cmp_fn(&sz_array[pos - 1], summary) < 0)
+ pos--;
+
+ /* Not big enough! */
+ if (pos >= top->alloc)
+ return;
+
+ /* We need to shift the data. */
+ if (top->nr == top->alloc)
+ free(sz_array[top->nr - 1].label);
+ else
+ top->nr++;
+
+ for (size_t i = top->nr - 1; i > pos; i--)
+ memcpy(&sz_array[i], &sz_array[i - 1], sizeof(*sz_array));
+
+ memcpy(&sz_array[pos], summary, sizeof(*summary));
+ sz_array[pos].label = xstrdup(summary->label);
+}
+
+/**
+ * This struct contains all of the information that needs to be printed
+ * at the end of the exploration of the repository and its references.
+ */
+struct survey_report {
+ struct survey_report_ref_summary refs;
+ struct survey_report_object_summary reachable_objects;
+
+ struct survey_report_object_size_summary *by_type;
+
+ struct survey_report_top_table *top_paths_by_count;
+ struct survey_report_top_table *top_paths_by_disk;
+ struct survey_report_top_table *top_paths_by_inflate;
+};
+
+#define REPORT_TYPE_COMMIT 0
+#define REPORT_TYPE_TREE 1
+#define REPORT_TYPE_BLOB 2
+#define REPORT_TYPE_TAG 3
+#define REPORT_TYPE_COUNT 4
+
+struct survey_context {
+ struct repository *repo;
+
+ /* Options that control what is done. */
+ struct survey_opts opts;
+
+ /* Info for output only. */
+ struct survey_report report;
+
+ /*
+ * The rest of the members are about enabling the activity
+ * of the 'git survey' command, including ref listings, object
+ * pointers, and progress.
+ */
+
+ struct progress *progress;
+ size_t progress_nr;
+ size_t progress_total;
+
+ struct strvec refs;
+ struct ref_array ref_array;
+};
+
+static void clear_survey_context(struct survey_context *ctx)
+{
+ ref_array_clear(&ctx->ref_array);
+ strvec_clear(&ctx->refs);
+}
+
+struct survey_table {
+ const char *table_name;
+ struct strvec header;
+ struct strvec *rows;
+ size_t rows_nr;
+ size_t rows_alloc;
+};
+
+#define SURVEY_TABLE_INIT { \
+ .header = STRVEC_INIT, \
+}
+
+static void clear_table(struct survey_table *table)
+{
+ strvec_clear(&table->header);
+ for (size_t i = 0; i < table->rows_nr; i++)
+ strvec_clear(&table->rows[i]);
+ free(table->rows);
+}
+
+static void insert_table_rowv(struct survey_table *table, ...)
+{
+ va_list ap;
+ char *arg;
+ ALLOC_GROW(table->rows, table->rows_nr + 1, table->rows_alloc);
+
+ memset(&table->rows[table->rows_nr], 0, sizeof(struct strvec));
+
+ va_start(ap, table);
+ while ((arg = va_arg(ap, char *)))
+ strvec_push(&table->rows[table->rows_nr], arg);
+ va_end(ap);
+
+ table->rows_nr++;
+}
+
+#define SECTION_SEGMENT "========================================"
+#define SECTION_SEGMENT_LEN 40
+static const char *section_line = SECTION_SEGMENT
+ SECTION_SEGMENT
+ SECTION_SEGMENT
+ SECTION_SEGMENT;
+static const size_t section_len = 4 * SECTION_SEGMENT_LEN;
+
+static void print_table_title(const char *name, size_t *widths, size_t nr)
+{
+ size_t width = 3 * (nr - 1);
+ size_t min_width = strlen(name);
+
+ for (size_t i = 0; i < nr; i++)
+ width += widths[i];
+
+ if (width < min_width)
+ width = min_width;
+
+ if (width > section_len)
+ width = section_len;
+
+ printf("\n%s\n%.*s\n", name, (int)width, section_line);
+}
+
+static void print_row_plaintext(struct strvec *row, size_t *widths)
+{
+ static struct strbuf line = STRBUF_INIT;
+ strbuf_setlen(&line, 0);
+
+ for (size_t i = 0; i < row->nr; i++) {
+ const char *str = row->v[i];
+ size_t len = strlen(str);
+ if (i)
+ strbuf_add(&line, " | ", 3);
+ strbuf_addchars(&line, ' ', widths[i] - len);
+ strbuf_add(&line, str, len);
+ }
+ printf("%s\n", line.buf);
+}
+
+static void print_divider_plaintext(size_t *widths, size_t nr)
+{
+ static struct strbuf line = STRBUF_INIT;
+ strbuf_setlen(&line, 0);
+
+ for (size_t i = 0; i < nr; i++) {
+ if (i)
+ strbuf_add(&line, "-+-", 3);
+ strbuf_addchars(&line, '-', widths[i]);
+ }
+ printf("%s\n", line.buf);
+}
+
+static void print_table_plaintext(struct survey_table *table)
+{
+ size_t *column_widths;
+ size_t columns_nr = table->header.nr;
+ CALLOC_ARRAY(column_widths, columns_nr);
+
+ for (size_t i = 0; i < columns_nr; i++) {
+ column_widths[i] = strlen(table->header.v[i]);
+
+ for (size_t j = 0; j < table->rows_nr; j++) {
+ size_t rowlen = strlen(table->rows[j].v[i]);
+ if (column_widths[i] < rowlen)
+ column_widths[i] = rowlen;
+ }
+ }
+
+ print_table_title(table->table_name, column_widths, columns_nr);
+ print_row_plaintext(&table->header, column_widths);
+ print_divider_plaintext(column_widths, columns_nr);
+
+ for (size_t j = 0; j < table->rows_nr; j++)
+ print_row_plaintext(&table->rows[j], column_widths);
+
+ free(column_widths);
+}
+
+static void survey_report_plaintext_refs(struct survey_context *ctx)
+{
+ struct survey_report_ref_summary *refs = &ctx->report.refs;
+ struct survey_table table = SURVEY_TABLE_INIT;
+
+ table.table_name = _("REFERENCES SUMMARY");
+
+ strvec_push(&table.header, _("Ref Type"));
+ strvec_push(&table.header, _("Count"));
+
+ if (ctx->opts.refs.want_all_refs || ctx->opts.refs.want_branches) {
+ char *fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->branches_nr);
+ insert_table_rowv(&table, _("Branches"), fmt, NULL);
+ free(fmt);
+ }
+
+ if (ctx->opts.refs.want_all_refs || ctx->opts.refs.want_remotes) {
+ char *fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->remote_refs_nr);
+ insert_table_rowv(&table, _("Remote refs"), fmt, NULL);
+ free(fmt);
+ }
+
+ if (ctx->opts.refs.want_all_refs || ctx->opts.refs.want_tags) {
+ char *fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->tags_nr);
+ insert_table_rowv(&table, _("Tags (all)"), fmt, NULL);
+ free(fmt);
+ fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->tags_annotated_nr);
+ insert_table_rowv(&table, _("Tags (annotated)"), fmt, NULL);
+ free(fmt);
+ }
+
+ print_table_plaintext(&table);
+ clear_table(&table);
+}
+
+static void survey_report_plaintext_reachable_object_summary(struct survey_context *ctx)
+{
+ struct survey_report_object_summary *objs = &ctx->report.reachable_objects;
+ struct survey_table table = SURVEY_TABLE_INIT;
+ char *fmt;
+
+ table.table_name = _("REACHABLE OBJECT SUMMARY");
+
+ strvec_push(&table.header, _("Object Type"));
+ strvec_push(&table.header, _("Count"));
+
+ fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)objs->tags_nr);
+ insert_table_rowv(&table, _("Tags"), fmt, NULL);
+ free(fmt);
+
+ fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)objs->commits_nr);
+ insert_table_rowv(&table, _("Commits"), fmt, NULL);
+ free(fmt);
+
+ fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)objs->trees_nr);
+ insert_table_rowv(&table, _("Trees"), fmt, NULL);
+ free(fmt);
+
+ fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)objs->blobs_nr);
+ insert_table_rowv(&table, _("Blobs"), fmt, NULL);
+ free(fmt);
+
+ print_table_plaintext(&table);
+ clear_table(&table);
+}
+
+static void survey_report_object_sizes(const char *title,
+ const char *categories,
+ struct survey_report_object_size_summary *summary,
+ size_t summary_nr)
+{
+ struct survey_table table = SURVEY_TABLE_INIT;
+ table.table_name = title;
+
+ strvec_push(&table.header, categories);
+ strvec_push(&table.header, _("Count"));
+ strvec_push(&table.header, _("Disk Size"));
+ strvec_push(&table.header, _("Inflated Size"));
+
+ for (size_t i = 0; i < summary_nr; i++) {
+ char *label_str = xstrdup(summary[i].label);
+ char *nr_str = xstrfmt("%"PRIuMAX, (uintmax_t)summary[i].nr);
+ char *disk_str = xstrfmt("%"PRIuMAX, (uintmax_t)summary[i].disk_size);
+ char *inflate_str = xstrfmt("%"PRIuMAX, (uintmax_t)summary[i].inflated_size);
+
+ insert_table_rowv(&table, label_str, nr_str,
+ disk_str, inflate_str, NULL);
+
+ free(label_str);
+ free(nr_str);
+ free(disk_str);
+ free(inflate_str);
+ }
+
+ print_table_plaintext(&table);
+ clear_table(&table);
+}
+
+static void survey_report_plaintext_sorted_size(
+ struct survey_report_top_table *top)
+{
+ survey_report_object_sizes(top->name, _("Path"),
+ top->data, top->nr);
+}
+
+static void survey_report_plaintext(struct survey_context *ctx)
+{
+ printf("GIT SURVEY for \"%s\"\n", ctx->repo->worktree);
+ printf("-----------------------------------------------------\n");
+ survey_report_plaintext_refs(ctx);
+ survey_report_plaintext_reachable_object_summary(ctx);
+ survey_report_object_sizes(_("TOTAL OBJECT SIZES BY TYPE"),
+ _("Object Type"),
+ ctx->report.by_type,
+ REPORT_TYPE_COUNT);
+
+ survey_report_plaintext_sorted_size(
+ &ctx->report.top_paths_by_count[REPORT_TYPE_TREE]);
+ survey_report_plaintext_sorted_size(
+ &ctx->report.top_paths_by_count[REPORT_TYPE_BLOB]);
+
+ survey_report_plaintext_sorted_size(
+ &ctx->report.top_paths_by_disk[REPORT_TYPE_TREE]);
+ survey_report_plaintext_sorted_size(
+ &ctx->report.top_paths_by_disk[REPORT_TYPE_BLOB]);
+
+ survey_report_plaintext_sorted_size(
+ &ctx->report.top_paths_by_inflate[REPORT_TYPE_TREE]);
+ survey_report_plaintext_sorted_size(
+ &ctx->report.top_paths_by_inflate[REPORT_TYPE_BLOB]);
+}
+
+/*
+ * After parsing the command line arguments, figure out which refs we
+ * should scan.
+ *
+ * If ANY were given in positive sense, then we ONLY include them and
+ * do not use the builtin values.
+ */
+static void fixup_refs_wanted(struct survey_context *ctx)
+{
+ struct survey_refs_wanted *rw = &ctx->opts.refs;
+
+ /*
+ * `--all-refs` overrides and enables everything.
+ */
+ if (rw->want_all_refs == 1) {
+ rw->want_branches = 1;
+ rw->want_tags = 1;
+ rw->want_remotes = 1;
+ rw->want_detached = 1;
+ rw->want_other = 1;
+ return;
+ }
+
+ /*
+ * If none of the `--` were given, we assume all
+ * of the builtin unspecified values.
+ */
+ if (rw->want_branches == -1 &&
+ rw->want_tags == -1 &&
+ rw->want_remotes == -1 &&
+ rw->want_detached == -1 &&
+ rw->want_other == -1) {
+ *rw = default_ref_options;
+ return;
+ }
+
+ /*
+ * Since we only allow positive boolean values on the command
+ * line, we will only have true values where they specified
+ * a `--`.
+ *
+ * So anything that still has an unspecified value should be
+ * set to false.
+ */
+ if (rw->want_branches == -1)
+ rw->want_branches = 0;
+ if (rw->want_tags == -1)
+ rw->want_tags = 0;
+ if (rw->want_remotes == -1)
+ rw->want_remotes = 0;
+ if (rw->want_detached == -1)
+ rw->want_detached = 0;
+ if (rw->want_other == -1)
+ rw->want_other = 0;
+}
+
+static int survey_load_config_cb(const char *var, const char *value,
+ const struct config_context *cctx, void *pvoid)
+{
+ struct survey_context *ctx = pvoid;
+
+ if (!strcmp(var, "survey.verbose")) {
+ ctx->opts.verbose = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "survey.progress")) {
+ ctx->opts.show_progress = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "survey.top")) {
+ ctx->opts.top_nr = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, cctx, pvoid);
+}
+
+static void survey_load_config(struct survey_context *ctx)
+{
+ repo_config(the_repository, survey_load_config_cb, ctx);
+}
+
+static void do_load_refs(struct survey_context *ctx,
+ struct ref_array *ref_array)
+{
+ struct ref_filter filter = REF_FILTER_INIT;
+ struct ref_sorting *sorting;
+ struct string_list sorting_options = STRING_LIST_INIT_DUP;
+
+ string_list_append(&sorting_options, "objectname");
+ sorting = ref_sorting_options(&sorting_options);
+
+ if (ctx->opts.refs.want_detached)
+ strvec_push(&ctx->refs, "HEAD");
+
+ if (ctx->opts.refs.want_all_refs) {
+ strvec_push(&ctx->refs, "refs/");
+ } else {
+ if (ctx->opts.refs.want_branches)
+ strvec_push(&ctx->refs, "refs/heads/");
+ if (ctx->opts.refs.want_tags)
+ strvec_push(&ctx->refs, "refs/tags/");
+ if (ctx->opts.refs.want_remotes)
+ strvec_push(&ctx->refs, "refs/remotes/");
+ if (ctx->opts.refs.want_other) {
+ strvec_push(&ctx->refs, "refs/notes/");
+ strvec_push(&ctx->refs, "refs/stash/");
+ }
+ }
+
+ filter.name_patterns = ctx->refs.v;
+ filter.ignore_case = 0;
+ filter.match_as_path = 1;
+
+ if (ctx->opts.show_progress) {
+ ctx->progress_total = 0;
+ ctx->progress = start_progress(ctx->repo,
+ _("Scanning refs..."), 0);
+ }
+
+ filter_refs(ref_array, &filter, FILTER_REFS_KIND_MASK);
+
+ if (ctx->opts.show_progress) {
+ ctx->progress_total = ref_array->nr;
+ display_progress(ctx->progress, ctx->progress_total);
+ }
+
+ ref_array_sort(sorting, ref_array);
+
+ stop_progress(&ctx->progress);
+ ref_filter_clear(&filter);
+ ref_sorting_release(sorting);
+}
+
+/*
+ * The REFS phase:
+ *
+ * Load the set of requested refs and assess them for scalablity problems.
+ * Use that set to start a treewalk to all reachable objects and assess
+ * them.
+ *
+ * This data will give us insights into the repository itself (the number
+ * of refs, the size and shape of the DAG, the number and size of the
+ * objects).
+ *
+ * Theoretically, this data is independent of the on-disk representation
+ * (e.g. independent of packing concerns).
+ */
+static void survey_phase_refs(struct survey_context *ctx)
+{
+ trace2_region_enter("survey", "phase/refs", ctx->repo);
+ do_load_refs(ctx, &ctx->ref_array);
+
+ ctx->report.refs.refs_nr = ctx->ref_array.nr;
+ for (int i = 0; i < ctx->ref_array.nr; i++) {
+ unsigned long size;
+ struct ref_array_item *item = ctx->ref_array.items[i];
+
+ switch (item->kind) {
+ case FILTER_REFS_TAGS:
+ ctx->report.refs.tags_nr++;
+ if (odb_read_object_info(ctx->repo->objects,
+ &item->objectname,
+ &size) == OBJ_TAG)
+ ctx->report.refs.tags_annotated_nr++;
+ break;
+
+ case FILTER_REFS_BRANCHES:
+ ctx->report.refs.branches_nr++;
+ break;
+
+ case FILTER_REFS_REMOTES:
+ ctx->report.refs.remote_refs_nr++;
+ break;
+
+ case FILTER_REFS_OTHERS:
+ ctx->report.refs.others_nr++;
+ break;
+
+ default:
+ ctx->report.refs.unknown_nr++;
+ break;
+ }
+ }
+
+ trace2_region_leave("survey", "phase/refs", ctx->repo);
+}
+
+static void increment_object_counts(
+ struct survey_report_object_summary *summary,
+ enum object_type type,
+ size_t nr)
+{
+ switch (type) {
+ case OBJ_COMMIT:
+ summary->commits_nr += nr;
+ break;
+
+ case OBJ_TREE:
+ summary->trees_nr += nr;
+ break;
+
+ case OBJ_BLOB:
+ summary->blobs_nr += nr;
+ break;
+
+ case OBJ_TAG:
+ summary->tags_nr += nr;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void increment_totals(struct survey_context *ctx,
+ struct oid_array *oids,
+ struct survey_report_object_size_summary *summary)
+{
+ for (size_t i = 0; i < oids->nr; i++) {
+ struct object_info oi = OBJECT_INFO_INIT;
+ unsigned oi_flags = OBJECT_INFO_FOR_PREFETCH;
+ unsigned long object_length = 0;
+ off_t disk_sizep = 0;
+ enum object_type type;
+
+ oi.typep = &type;
+ oi.sizep = &object_length;
+ oi.disk_sizep = &disk_sizep;
+
+ if (odb_read_object_info_extended(ctx->repo->objects,
+ &oids->oid[i],
+ &oi, oi_flags) < 0) {
+ summary->num_missing++;
+ } else {
+ summary->nr++;
+ summary->disk_size += disk_sizep;
+ summary->inflated_size += object_length;
+ }
+ }
+}
+
+static void increment_object_totals(struct survey_context *ctx,
+ struct oid_array *oids,
+ enum object_type type,
+ const char *path)
+{
+ struct survey_report_object_size_summary *total;
+ struct survey_report_object_size_summary summary = { 0 };
+
+ increment_totals(ctx, oids, &summary);
+
+ switch (type) {
+ case OBJ_COMMIT:
+ total = &ctx->report.by_type[REPORT_TYPE_COMMIT];
+ break;
+
+ case OBJ_TREE:
+ total = &ctx->report.by_type[REPORT_TYPE_TREE];
+ break;
+
+ case OBJ_BLOB:
+ total = &ctx->report.by_type[REPORT_TYPE_BLOB];
+ break;
+
+ case OBJ_TAG:
+ total = &ctx->report.by_type[REPORT_TYPE_TAG];
+ break;
+
+ default:
+ BUG("No other type allowed");
+ }
+
+ total->nr += summary.nr;
+ total->disk_size += summary.disk_size;
+ total->inflated_size += summary.inflated_size;
+ total->num_missing += summary.num_missing;
+
+ if (type == OBJ_TREE || type == OBJ_BLOB) {
+ int index = type == OBJ_TREE ?
+ REPORT_TYPE_TREE : REPORT_TYPE_BLOB;
+ struct survey_report_top_table *top;
+
+ /*
+ * Temporarily store (const char *) here, but it will
+ * be duped if inserted and will not be freed.
+ */
+ summary.label = (char *)path;
+
+ top = ctx->report.top_paths_by_count;
+ maybe_insert_into_top_size(&top[index], &summary);
+
+ top = ctx->report.top_paths_by_disk;
+ maybe_insert_into_top_size(&top[index], &summary);
+
+ top = ctx->report.top_paths_by_inflate;
+ maybe_insert_into_top_size(&top[index], &summary);
+ }
+}
+
+static int survey_objects_path_walk_fn(const char *path,
+ struct oid_array *oids,
+ enum object_type type,
+ void *data)
+{
+ struct survey_context *ctx = data;
+
+ increment_object_counts(&ctx->report.reachable_objects,
+ type, oids->nr);
+ increment_object_totals(ctx, oids, type, path);
+
+ ctx->progress_nr += oids->nr;
+ display_progress(ctx->progress, ctx->progress_nr);
+
+ return 0;
+}
+
+static void initialize_report(struct survey_context *ctx)
+{
+ CALLOC_ARRAY(ctx->report.by_type, REPORT_TYPE_COUNT);
+ ctx->report.by_type[REPORT_TYPE_COMMIT].label = xstrdup(_("Commits"));
+ ctx->report.by_type[REPORT_TYPE_TREE].label = xstrdup(_("Trees"));
+ ctx->report.by_type[REPORT_TYPE_BLOB].label = xstrdup(_("Blobs"));
+ ctx->report.by_type[REPORT_TYPE_TAG].label = xstrdup(_("Tags"));
+
+ CALLOC_ARRAY(ctx->report.top_paths_by_count, REPORT_TYPE_COUNT);
+ init_top_sizes(&ctx->report.top_paths_by_count[REPORT_TYPE_TREE],
+ ctx->opts.top_nr, _("TOP DIRECTORIES BY COUNT"), cmp_by_nr);
+ init_top_sizes(&ctx->report.top_paths_by_count[REPORT_TYPE_BLOB],
+ ctx->opts.top_nr, _("TOP FILES BY COUNT"), cmp_by_nr);
+
+ CALLOC_ARRAY(ctx->report.top_paths_by_disk, REPORT_TYPE_COUNT);
+ init_top_sizes(&ctx->report.top_paths_by_disk[REPORT_TYPE_TREE],
+ ctx->opts.top_nr, _("TOP DIRECTORIES BY DISK SIZE"), cmp_by_disk_size);
+ init_top_sizes(&ctx->report.top_paths_by_disk[REPORT_TYPE_BLOB],
+ ctx->opts.top_nr, _("TOP FILES BY DISK SIZE"), cmp_by_disk_size);
+
+ CALLOC_ARRAY(ctx->report.top_paths_by_inflate, REPORT_TYPE_COUNT);
+ init_top_sizes(&ctx->report.top_paths_by_inflate[REPORT_TYPE_TREE],
+ ctx->opts.top_nr, _("TOP DIRECTORIES BY INFLATED SIZE"), cmp_by_inflated_size);
+ init_top_sizes(&ctx->report.top_paths_by_inflate[REPORT_TYPE_BLOB],
+ ctx->opts.top_nr, _("TOP FILES BY INFLATED SIZE"), cmp_by_inflated_size);
+}
+
+static void survey_phase_objects(struct survey_context *ctx)
+{
+ struct rev_info revs = REV_INFO_INIT;
+ struct path_walk_info info = PATH_WALK_INFO_INIT;
+ unsigned int add_flags = 0;
+
+ trace2_region_enter("survey", "phase/objects", ctx->repo);
+
+ info.revs = &revs;
+ info.path_fn = survey_objects_path_walk_fn;
+ info.path_fn_data = ctx;
+
+ initialize_report(ctx);
+
+ repo_init_revisions(ctx->repo, &revs, "");
+ revs.tag_objects = 1;
+
+ ctx->progress_nr = 0;
+ ctx->progress_total = ctx->ref_array.nr;
+ if (ctx->opts.show_progress)
+ ctx->progress = start_progress(ctx->repo,
+ _("Preparing object walk"),
+ ctx->progress_total);
+ for (int i = 0; i < ctx->ref_array.nr; i++) {
+ struct ref_array_item *item = ctx->ref_array.items[i];
+ add_pending_oid(&revs, NULL, &item->objectname, add_flags);
+ display_progress(ctx->progress, ++(ctx->progress_nr));
+ }
+ stop_progress(&ctx->progress);
+
+ ctx->progress_nr = 0;
+ ctx->progress_total = 0;
+ if (ctx->opts.show_progress)
+ ctx->progress = start_progress(ctx->repo,
+ _("Walking objects"), 0);
+ walk_objects_by_path(&info);
+ stop_progress(&ctx->progress);
+
+ release_revisions(&revs);
+ trace2_region_leave("survey", "phase/objects", ctx->repo);
+}
+
+int cmd_survey(int argc, const char **argv, const char *prefix, struct repository *repo)
+{
+ static struct survey_context ctx = {
+ .opts = {
+ .verbose = 0,
+ .show_progress = -1, /* defaults to isatty(2) */
+ .top_nr = 10,
+
+ .refs.want_all_refs = -1,
+
+ .refs.want_branches = -1, /* default these to undefined */
+ .refs.want_tags = -1,
+ .refs.want_remotes = -1,
+ .refs.want_detached = -1,
+ .refs.want_other = -1,
+ },
+ .refs = STRVEC_INIT,
+ };
+
+ static struct option survey_options[] = {
+ OPT__VERBOSE(&ctx.opts.verbose, N_("verbose output")),
+ OPT_BOOL(0, "progress", &ctx.opts.show_progress, N_("show progress")),
+ OPT_INTEGER('n', "top", &ctx.opts.top_nr,
+ N_("number of entries to include in detail tables")),
+
+ OPT_BOOL_F(0, "all-refs", &ctx.opts.refs.want_all_refs, N_("include all refs"), PARSE_OPT_NONEG),
+
+ OPT_BOOL_F(0, "branches", &ctx.opts.refs.want_branches, N_("include branches"), PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "tags", &ctx.opts.refs.want_tags, N_("include tags"), PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "remotes", &ctx.opts.refs.want_remotes, N_("include all remotes refs"), PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "detached", &ctx.opts.refs.want_detached, N_("include detached HEAD"), PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "other", &ctx.opts.refs.want_other, N_("include notes and stashes"), PARSE_OPT_NONEG),
+
+ OPT_END(),
+ };
+
+ show_usage_with_options_if_asked(argc, argv,
+ survey_usage, survey_options);
+
+ if (isatty(2))
+ color_fprintf_ln(stderr,
+ want_color_fd(2, GIT_COLOR_AUTO) ? GIT_COLOR_YELLOW : "",
+ "(THIS IS EXPERIMENTAL, EXPECT THE OUTPUT FORMAT TO CHANGE!)");
+
+ ctx.repo = repo;
+
+ prepare_repo_settings(ctx.repo);
+ survey_load_config(&ctx);
+
+ argc = parse_options(argc, argv, prefix, survey_options, survey_usage, 0);
+
+ if (ctx.opts.show_progress < 0)
+ ctx.opts.show_progress = isatty(2);
+
+ fixup_refs_wanted(&ctx);
+
+ survey_phase_refs(&ctx);
+
+ survey_phase_objects(&ctx);
+
+ survey_report_plaintext(&ctx);
+
+ clear_survey_context(&ctx);
+ return 0;
+}
diff --git a/ci/check-whitespace.sh b/ci/check-whitespace.sh
index c40804394cb079..e590ac0dfd765e 100755
--- a/ci/check-whitespace.sh
+++ b/ci/check-whitespace.sh
@@ -19,6 +19,7 @@ problems=()
commit=
commitText=
commitTextmd=
+committerEmail=
goodParent=
if ! git rev-parse --quiet --verify "${baseCommit}"
@@ -27,7 +28,7 @@ then
exit 1
fi
-while read dash sha etc
+while read dash email sha etc
do
case "${dash}" in
"---") # Line contains commit information.
@@ -40,10 +41,14 @@ do
commit="${sha}"
commitText="${sha} ${etc}"
commitTextmd="[${sha}](${url}/commit/${sha}) ${etc}"
+ committerEmail="${email}"
;;
"")
;;
*) # Line contains whitespace error information for current commit.
+ # Quod licet Iovi non licet bovi
+ test gitster@pobox.com != "$committerEmail" || break
+
if test -n "${goodParent}"
then
problems+=("1) --- ${commitTextmd}")
@@ -64,7 +69,7 @@ do
echo "${dash} ${sha} ${etc}"
;;
esac
-done <<< "$(git log --check --pretty=format:"---% h% s" "${baseCommit}"..)"
+done <<< "$(git log --check --pretty=format:"---% ce% h% s" "${baseCommit}"..)"
if test ${#problems[*]} -gt 0
then
diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh
index c55441d9df91fd..745e39997935a4 100755
--- a/ci/install-dependencies.sh
+++ b/ci/install-dependencies.sh
@@ -119,11 +119,12 @@ macos-*)
# brew install gnu-time
brew link --force gettext
- mkdir -p "$CUSTOM_PATH"
- wget -q "$P4WHENCE/bin.macosx12arm64/helix-core-server.tgz" &&
- tar -xf helix-core-server.tgz -C "$CUSTOM_PATH" p4 p4d &&
- sudo xattr -d com.apple.quarantine "$CUSTOM_PATH/p4" "$CUSTOM_PATH/p4d" 2>/dev/null || true
- rm helix-core-server.tgz
+ # Uncomment this block if you want to run `git p4` tests:
+ # mkdir -p "$CUSTOM_PATH"
+ # wget -q "$P4WHENCE/bin.macosx12arm64/helix-core-server.tgz" &&
+ # tar -xf helix-core-server.tgz -C "$CUSTOM_PATH" p4 p4d &&
+ # sudo xattr -d com.apple.quarantine "$CUSTOM_PATH/p4" "$CUSTOM_PATH/p4d" 2>/dev/null || true
+ # rm helix-core-server.tgz
case "$jobname" in
osx-meson)
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index 28cfe730ee5aed..9bdfac128dbf55 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -63,5 +63,9 @@ case "$jobname" in
;;
esac
+case " $MAKE_TARGETS " in
+*" all "*) make -C contrib/subtree test;;
+esac
+
check_unignored_build_artifacts
save_good_tree
diff --git a/ci/run-test-slice.sh b/ci/run-test-slice.sh
index ff948e397fcb70..f84190e7b73180 100755
--- a/ci/run-test-slice.sh
+++ b/ci/run-test-slice.sh
@@ -15,4 +15,7 @@ if [ "$1" == "0" ] ; then
group "Run unit tests" make --quiet -C t unit-tests-test-tool
fi
+# Run the git subtree tests only if main tests succeeded
+test 0 != "$1" || make -C contrib/subtree test
+
check_unignored_build_artifacts
diff --git a/command-list.txt b/command-list.txt
index f9005cf45979f1..d32cc32ef247ac 100644
--- a/command-list.txt
+++ b/command-list.txt
@@ -191,6 +191,7 @@ git-stash mainporcelain
git-status mainporcelain info
git-stripspace purehelpers
git-submodule mainporcelain
+git-survey mainporcelain
git-svn foreignscminterface
git-switch mainporcelain history
git-symbolic-ref plumbingmanipulators
diff --git a/common-exit.c b/common-exit.c
index 1aaa538be3ed67..609f32abed8b53 100644
--- a/common-exit.c
+++ b/common-exit.c
@@ -11,6 +11,13 @@ static void check_bug_if_BUG(void)
/* We wrap exit() to call common_exit() in git-compat-util.h */
int common_exit(const char *file, int line, int code)
{
+ /*
+ * Windows Filtering Platform driver provided by the security software
+ * may change buffer type of stdout from _IONBF to _IOFBF.
+ * It will no output without fflush manually.
+ */
+ fflush(stdout);
+
/*
* For non-POSIX systems: Take the lowest 8 bits of the "code"
* to e.g. turn -1 into 255. On a POSIX system this is
diff --git a/compat/.gitattributes b/compat/.gitattributes
index 40dbfb170dabc5..2b5a66a3b34bda 100644
--- a/compat/.gitattributes
+++ b/compat/.gitattributes
@@ -1 +1,2 @@
/zlib-uncompress2.c whitespace=-indent-with-non-tab,-trailing-space
+/mimalloc/**/* whitespace=-trailing-space
diff --git a/compat/fsmonitor/fsm-health-win32.c b/compat/fsmonitor/fsm-health-win32.c
index 2aa8c219acee4d..4b53360d194105 100644
--- a/compat/fsmonitor/fsm-health-win32.c
+++ b/compat/fsmonitor/fsm-health-win32.c
@@ -34,7 +34,7 @@ struct fsm_health_data
struct wt_moved
{
- wchar_t wpath[MAX_PATH + 1];
+ wchar_t wpath[MAX_LONG_PATH + 1];
BY_HANDLE_FILE_INFORMATION bhfi;
} wt_moved;
};
@@ -143,8 +143,8 @@ static int has_worktree_moved(struct fsmonitor_daemon_state *state,
return 0;
case CTX_INIT:
- if (xutftowcs_path(data->wt_moved.wpath,
- state->path_worktree_watch.buf) < 0) {
+ if (xutftowcs_long_path(data->wt_moved.wpath,
+ state->path_worktree_watch.buf) < 0) {
error(_("could not convert to wide characters: '%s'"),
state->path_worktree_watch.buf);
return -1;
diff --git a/compat/fsmonitor/fsm-listen-win32.c b/compat/fsmonitor/fsm-listen-win32.c
index 9a6efc9bea340b..afcc172750af10 100644
--- a/compat/fsmonitor/fsm-listen-win32.c
+++ b/compat/fsmonitor/fsm-listen-win32.c
@@ -28,7 +28,7 @@ struct one_watch
DWORD count;
struct strbuf path;
- wchar_t wpath_longname[MAX_PATH + 1];
+ wchar_t wpath_longname[MAX_LONG_PATH + 1];
DWORD wpath_longname_len;
HANDLE hDir;
@@ -131,8 +131,8 @@ static int normalize_path_in_utf8(wchar_t *wpath, DWORD wpath_len,
*/
static void check_for_shortnames(struct one_watch *watch)
{
- wchar_t buf_in[MAX_PATH + 1];
- wchar_t buf_out[MAX_PATH + 1];
+ wchar_t buf_in[MAX_LONG_PATH + 1];
+ wchar_t buf_out[MAX_LONG_PATH + 1];
wchar_t *last;
wchar_t *p;
@@ -197,8 +197,8 @@ static enum get_relative_result get_relative_longname(
const wchar_t *wpath, DWORD wpath_len,
wchar_t *wpath_longname, size_t bufsize_wpath_longname)
{
- wchar_t buf_in[2 * MAX_PATH + 1];
- wchar_t buf_out[MAX_PATH + 1];
+ wchar_t buf_in[2 * MAX_LONG_PATH + 1];
+ wchar_t buf_out[MAX_LONG_PATH + 1];
DWORD root_len;
DWORD out_len;
@@ -298,10 +298,10 @@ static struct one_watch *create_watch(const char *path)
FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE;
HANDLE hDir;
DWORD len_longname;
- wchar_t wpath[MAX_PATH + 1];
- wchar_t wpath_longname[MAX_PATH + 1];
+ wchar_t wpath[MAX_LONG_PATH + 1];
+ wchar_t wpath_longname[MAX_LONG_PATH + 1];
- if (xutftowcs_path(wpath, path) < 0) {
+ if (xutftowcs_long_path(wpath, path) < 0) {
error(_("could not convert to wide characters: '%s'"), path);
return NULL;
}
@@ -545,7 +545,7 @@ static int process_worktree_events(struct fsmonitor_daemon_state *state)
struct string_list cookie_list = STRING_LIST_INIT_DUP;
struct fsmonitor_batch *batch = NULL;
const char *p = watch->buffer;
- wchar_t wpath_longname[MAX_PATH + 1];
+ wchar_t wpath_longname[MAX_LONG_PATH + 1];
/*
* If the kernel gets more events than will fit in the kernel
diff --git a/compat/fsmonitor/fsm-path-utils-win32.c b/compat/fsmonitor/fsm-path-utils-win32.c
index f4f9cc1f336720..c6eb065bde48b4 100644
--- a/compat/fsmonitor/fsm-path-utils-win32.c
+++ b/compat/fsmonitor/fsm-path-utils-win32.c
@@ -69,8 +69,8 @@ static int check_remote_protocol(wchar_t *wpath)
*/
int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
{
- wchar_t wpath[MAX_PATH];
- wchar_t wfullpath[MAX_PATH];
+ wchar_t wpath[MAX_LONG_PATH];
+ wchar_t wfullpath[MAX_LONG_PATH];
size_t wlen;
UINT driveType;
@@ -78,7 +78,7 @@ int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
* Do everything in wide chars because the drive letter might be
* a multi-byte sequence. See win32_has_dos_drive_prefix().
*/
- if (xutftowcs_path(wpath, path) < 0) {
+ if (xutftowcs_long_path(wpath, path) < 0) {
return -1;
}
@@ -97,7 +97,7 @@ int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
* slashes to backslashes. This is essential to get GetDriveTypeW()
* correctly handle some UNC "\\server\share\..." paths.
*/
- if (!GetFullPathNameW(wpath, MAX_PATH, wfullpath, NULL)) {
+ if (!GetFullPathNameW(wpath, MAX_LONG_PATH, wfullpath, NULL)) {
return -1;
}
diff --git a/compat/lazyload-curl.c b/compat/lazyload-curl.c
new file mode 100644
index 00000000000000..a6a3f7e3a7aeaa
--- /dev/null
+++ b/compat/lazyload-curl.c
@@ -0,0 +1,428 @@
+#include "../git-compat-util.h"
+#include "../git-curl-compat.h"
+#ifndef WIN32
+#include
+#endif
+
+/*
+ * The ABI version of libcurl is encoded in its shared libraries' file names.
+ * This ABI version has not changed since October 2006 and is unlikely to be
+ * changed in the future. See https://curl.se/libcurl/abi.html for details.
+ */
+#define LIBCURL_ABI_VERSION "4"
+
+typedef void (*func_t)(void);
+
+#ifndef WIN32
+#ifdef __APPLE__
+#define LIBCURL_FILE_NAME(base) base "." LIBCURL_ABI_VERSION ".dylib"
+#else
+#define LIBCURL_FILE_NAME(base) base ".so." LIBCURL_ABI_VERSION
+#endif
+
+static void *load_library(const char *name)
+{
+ return dlopen(name, RTLD_LAZY);
+}
+
+static func_t load_function(void *handle, const char *name)
+{
+ /*
+ * Casting the return value of `dlsym()` to a function pointer is
+ * explicitly allowed in recent POSIX standards, but GCC complains
+ * about this in pedantic mode nevertheless. For more about this issue,
+ * see https://stackoverflow.com/q/31526876/1860823 and
+ * http://stackoverflow.com/a/36385690/1905491.
+ */
+ func_t f;
+ *(void **)&f = dlsym(handle, name);
+ return f;
+}
+#else
+#define LIBCURL_FILE_NAME(base) base "-" LIBCURL_ABI_VERSION ".dll"
+
+static void *load_library(const char *name)
+{
+ size_t name_size = strlen(name) + 1;
+ const char *path = getenv("PATH");
+ char dll_path[MAX_PATH];
+
+ while (path && *path) {
+ const char *sep = strchrnul(path, ';');
+ size_t len = sep - path;
+
+ if (len && len + name_size < sizeof(dll_path)) {
+ memcpy(dll_path, path, len);
+ dll_path[len] = '/';
+ memcpy(dll_path + len + 1, name, name_size);
+
+ if (!access(dll_path, R_OK)) {
+ wchar_t wpath[MAX_PATH];
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, dll_path, -1, wpath, ARRAY_SIZE(wpath));
+ void *res = wlen ? (void *)LoadLibraryExW(wpath, NULL, 0) : NULL;
+ if (!res) {
+ DWORD err = GetLastError();
+ char buf[1024];
+
+ if (!FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_ARGUMENT_ARRAY |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err, LANG_NEUTRAL,
+ buf, sizeof(buf) - 1, NULL))
+ xsnprintf(buf, sizeof(buf), "last error: %ld", err);
+ error("LoadLibraryExW() failed with: %s", buf);
+ }
+ return res;
+ }
+ }
+
+ path = *sep ? sep + 1 : NULL;
+ }
+
+ return NULL;
+}
+
+static func_t load_function(void *handle, const char *name)
+{
+ return (func_t)GetProcAddress((HANDLE)handle, name);
+}
+#endif
+
+typedef struct curl_version_info_data *(*curl_version_info_type)(CURLversion version);
+static curl_version_info_type curl_version_info_func;
+
+typedef char *(*curl_easy_escape_type)(CURL *handle, const char *string, int length);
+static curl_easy_escape_type curl_easy_escape_func;
+
+typedef void (*curl_free_type)(void *p);
+static curl_free_type curl_free_func;
+
+typedef CURLcode (*curl_global_init_type)(long flags);
+static curl_global_init_type curl_global_init_func;
+
+typedef CURLsslset (*curl_global_sslset_type)(curl_sslbackend id, const char *name, const curl_ssl_backend ***avail);
+static curl_global_sslset_type curl_global_sslset_func;
+
+typedef void (*curl_global_cleanup_type)(void);
+static curl_global_cleanup_type curl_global_cleanup_func;
+
+typedef CURLcode (*curl_global_trace_type)(const char *config);
+static curl_global_trace_type curl_global_trace_func;
+
+typedef struct curl_slist *(*curl_slist_append_type)(struct curl_slist *list, const char *data);
+static curl_slist_append_type curl_slist_append_func;
+
+typedef void (*curl_slist_free_all_type)(struct curl_slist *list);
+static curl_slist_free_all_type curl_slist_free_all_func;
+
+typedef const char *(*curl_easy_strerror_type)(CURLcode error);
+static curl_easy_strerror_type curl_easy_strerror_func;
+
+typedef CURLM *(*curl_multi_init_type)(void);
+static curl_multi_init_type curl_multi_init_func;
+
+typedef CURLMcode (*curl_multi_add_handle_type)(CURLM *multi_handle, CURL *curl_handle);
+static curl_multi_add_handle_type curl_multi_add_handle_func;
+
+typedef CURLMcode (*curl_multi_remove_handle_type)(CURLM *multi_handle, CURL *curl_handle);
+static curl_multi_remove_handle_type curl_multi_remove_handle_func;
+
+typedef CURLMcode (*curl_multi_fdset_type)(CURLM *multi_handle, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd);
+static curl_multi_fdset_type curl_multi_fdset_func;
+
+typedef CURLMcode (*curl_multi_perform_type)(CURLM *multi_handle, int *running_handles);
+static curl_multi_perform_type curl_multi_perform_func;
+
+typedef CURLMcode (*curl_multi_cleanup_type)(CURLM *multi_handle);
+static curl_multi_cleanup_type curl_multi_cleanup_func;
+
+typedef CURLMsg *(*curl_multi_info_read_type)(CURLM *multi_handle, int *msgs_in_queue);
+static curl_multi_info_read_type curl_multi_info_read_func;
+
+typedef const char *(*curl_multi_strerror_type)(CURLMcode error);
+static curl_multi_strerror_type curl_multi_strerror_func;
+
+typedef CURLMcode (*curl_multi_timeout_type)(CURLM *multi_handle, long *milliseconds);
+static curl_multi_timeout_type curl_multi_timeout_func;
+
+typedef CURL *(*curl_easy_init_type)(void);
+static curl_easy_init_type curl_easy_init_func;
+
+typedef CURLcode (*curl_easy_perform_type)(CURL *curl);
+static curl_easy_perform_type curl_easy_perform_func;
+
+typedef void (*curl_easy_cleanup_type)(CURL *curl);
+static curl_easy_cleanup_type curl_easy_cleanup_func;
+
+typedef CURL *(*curl_easy_duphandle_type)(CURL *curl);
+static curl_easy_duphandle_type curl_easy_duphandle_func;
+
+typedef CURLcode (*curl_easy_getinfo_long_type)(CURL *curl, CURLINFO info, long *value);
+static curl_easy_getinfo_long_type curl_easy_getinfo_long_func;
+
+typedef CURLcode (*curl_easy_getinfo_pointer_type)(CURL *curl, CURLINFO info, void **value);
+static curl_easy_getinfo_pointer_type curl_easy_getinfo_pointer_func;
+
+typedef CURLcode (*curl_easy_getinfo_off_t_type)(CURL *curl, CURLINFO info, curl_off_t *value);
+static curl_easy_getinfo_off_t_type curl_easy_getinfo_off_t_func;
+
+typedef CURLcode (*curl_easy_setopt_long_type)(CURL *curl, CURLoption opt, long value);
+static curl_easy_setopt_long_type curl_easy_setopt_long_func;
+
+typedef CURLcode (*curl_easy_setopt_pointer_type)(CURL *curl, CURLoption opt, void *value);
+static curl_easy_setopt_pointer_type curl_easy_setopt_pointer_func;
+
+typedef CURLcode (*curl_easy_setopt_off_t_type)(CURL *curl, CURLoption opt, curl_off_t value);
+static curl_easy_setopt_off_t_type curl_easy_setopt_off_t_func;
+
+static char ssl_backend[64];
+
+static void lazy_load_curl(void)
+{
+ static int initialized;
+ void *libcurl = NULL;
+ func_t curl_easy_getinfo_func, curl_easy_setopt_func;
+
+ if (initialized)
+ return;
+
+ initialized = 1;
+ if (ssl_backend[0]) {
+ char dll_name[64 + 16];
+ snprintf(dll_name, sizeof(dll_name) - 1,
+ LIBCURL_FILE_NAME("libcurl-%s"), ssl_backend);
+ libcurl = load_library(dll_name);
+ }
+ if (!libcurl)
+ libcurl = load_library(LIBCURL_FILE_NAME("libcurl"));
+ if (!libcurl)
+ die("failed to load library '%s'", LIBCURL_FILE_NAME("libcurl"));
+
+ curl_version_info_func = (curl_version_info_type)load_function(libcurl, "curl_version_info");
+ curl_easy_escape_func = (curl_easy_escape_type)load_function(libcurl, "curl_easy_escape");
+ curl_free_func = (curl_free_type)load_function(libcurl, "curl_free");
+ curl_global_init_func = (curl_global_init_type)load_function(libcurl, "curl_global_init");
+ curl_global_sslset_func = (curl_global_sslset_type)load_function(libcurl, "curl_global_sslset");
+ curl_global_cleanup_func = (curl_global_cleanup_type)load_function(libcurl, "curl_global_cleanup");
+ curl_global_trace_func = (curl_global_trace_type)load_function(libcurl, "curl_global_trace");
+ curl_slist_append_func = (curl_slist_append_type)load_function(libcurl, "curl_slist_append");
+ curl_slist_free_all_func = (curl_slist_free_all_type)load_function(libcurl, "curl_slist_free_all");
+ curl_easy_strerror_func = (curl_easy_strerror_type)load_function(libcurl, "curl_easy_strerror");
+ curl_multi_init_func = (curl_multi_init_type)load_function(libcurl, "curl_multi_init");
+ curl_multi_add_handle_func = (curl_multi_add_handle_type)load_function(libcurl, "curl_multi_add_handle");
+ curl_multi_remove_handle_func = (curl_multi_remove_handle_type)load_function(libcurl, "curl_multi_remove_handle");
+ curl_multi_fdset_func = (curl_multi_fdset_type)load_function(libcurl, "curl_multi_fdset");
+ curl_multi_perform_func = (curl_multi_perform_type)load_function(libcurl, "curl_multi_perform");
+ curl_multi_cleanup_func = (curl_multi_cleanup_type)load_function(libcurl, "curl_multi_cleanup");
+ curl_multi_info_read_func = (curl_multi_info_read_type)load_function(libcurl, "curl_multi_info_read");
+ curl_multi_strerror_func = (curl_multi_strerror_type)load_function(libcurl, "curl_multi_strerror");
+ curl_multi_timeout_func = (curl_multi_timeout_type)load_function(libcurl, "curl_multi_timeout");
+ curl_easy_init_func = (curl_easy_init_type)load_function(libcurl, "curl_easy_init");
+ curl_easy_perform_func = (curl_easy_perform_type)load_function(libcurl, "curl_easy_perform");
+ curl_easy_cleanup_func = (curl_easy_cleanup_type)load_function(libcurl, "curl_easy_cleanup");
+ curl_easy_duphandle_func = (curl_easy_duphandle_type)load_function(libcurl, "curl_easy_duphandle");
+
+ curl_easy_getinfo_func = load_function(libcurl, "curl_easy_getinfo");
+ curl_easy_getinfo_long_func = (curl_easy_getinfo_long_type)curl_easy_getinfo_func;
+ curl_easy_getinfo_pointer_func = (curl_easy_getinfo_pointer_type)curl_easy_getinfo_func;
+ curl_easy_getinfo_off_t_func = (curl_easy_getinfo_off_t_type)curl_easy_getinfo_func;
+
+ curl_easy_setopt_func = load_function(libcurl, "curl_easy_setopt");
+ curl_easy_setopt_long_func = (curl_easy_setopt_long_type)curl_easy_setopt_func;
+ curl_easy_setopt_pointer_func = (curl_easy_setopt_pointer_type)curl_easy_setopt_func;
+ curl_easy_setopt_off_t_func = (curl_easy_setopt_off_t_type)curl_easy_setopt_func;
+}
+
+struct curl_version_info_data *curl_version_info(CURLversion version)
+{
+ lazy_load_curl();
+ return curl_version_info_func(version);
+}
+
+char *curl_easy_escape(CURL *handle, const char *string, int length)
+{
+ lazy_load_curl();
+ return curl_easy_escape_func(handle, string, length);
+}
+
+void curl_free(void *p)
+{
+ lazy_load_curl();
+ curl_free_func(p);
+}
+
+CURLcode curl_global_init(long flags)
+{
+ lazy_load_curl();
+ return curl_global_init_func(flags);
+}
+
+CURLsslset curl_global_sslset(curl_sslbackend id, const char *name, const curl_ssl_backend ***avail)
+{
+ if (name && strlen(name) < sizeof(ssl_backend))
+ strlcpy(ssl_backend, name, sizeof(ssl_backend));
+
+ lazy_load_curl();
+ return curl_global_sslset_func(id, name, avail);
+}
+
+void curl_global_cleanup(void)
+{
+ lazy_load_curl();
+ curl_global_cleanup_func();
+}
+
+CURLcode curl_global_trace(const char *config)
+{
+ lazy_load_curl();
+ return curl_global_trace_func(config);
+}
+
+struct curl_slist *curl_slist_append(struct curl_slist *list, const char *data)
+{
+ lazy_load_curl();
+ return curl_slist_append_func(list, data);
+}
+
+void curl_slist_free_all(struct curl_slist *list)
+{
+ lazy_load_curl();
+ curl_slist_free_all_func(list);
+}
+
+const char *curl_easy_strerror(CURLcode error)
+{
+ lazy_load_curl();
+ return curl_easy_strerror_func(error);
+}
+
+CURLM *curl_multi_init(void)
+{
+ lazy_load_curl();
+ return curl_multi_init_func();
+}
+
+CURLMcode curl_multi_add_handle(CURLM *multi_handle, CURL *curl_handle)
+{
+ lazy_load_curl();
+ return curl_multi_add_handle_func(multi_handle, curl_handle);
+}
+
+CURLMcode curl_multi_remove_handle(CURLM *multi_handle, CURL *curl_handle)
+{
+ lazy_load_curl();
+ return curl_multi_remove_handle_func(multi_handle, curl_handle);
+}
+
+CURLMcode curl_multi_fdset(CURLM *multi_handle, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd)
+{
+ lazy_load_curl();
+ return curl_multi_fdset_func(multi_handle, read_fd_set, write_fd_set, exc_fd_set, max_fd);
+}
+
+CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
+{
+ lazy_load_curl();
+ return curl_multi_perform_func(multi_handle, running_handles);
+}
+
+CURLMcode curl_multi_cleanup(CURLM *multi_handle)
+{
+ lazy_load_curl();
+ return curl_multi_cleanup_func(multi_handle);
+}
+
+CURLMsg *curl_multi_info_read(CURLM *multi_handle, int *msgs_in_queue)
+{
+ lazy_load_curl();
+ return curl_multi_info_read_func(multi_handle, msgs_in_queue);
+}
+
+const char *curl_multi_strerror(CURLMcode error)
+{
+ lazy_load_curl();
+ return curl_multi_strerror_func(error);
+}
+
+CURLMcode curl_multi_timeout(CURLM *multi_handle, long *milliseconds)
+{
+ lazy_load_curl();
+ return curl_multi_timeout_func(multi_handle, milliseconds);
+}
+
+CURL *curl_easy_init(void)
+{
+ lazy_load_curl();
+ return curl_easy_init_func();
+}
+
+CURLcode curl_easy_perform(CURL *curl)
+{
+ lazy_load_curl();
+ return curl_easy_perform_func(curl);
+}
+
+void curl_easy_cleanup(CURL *curl)
+{
+ lazy_load_curl();
+ curl_easy_cleanup_func(curl);
+}
+
+CURL *curl_easy_duphandle(CURL *curl)
+{
+ lazy_load_curl();
+ return curl_easy_duphandle_func(curl);
+}
+
+#ifndef CURL_IGNORE_DEPRECATION
+#define CURL_IGNORE_DEPRECATION(x) x
+#endif
+
+#ifndef CURLOPTTYPE_BLOB
+#define CURLOPTTYPE_BLOB 40000
+#endif
+
+#undef curl_easy_getinfo
+CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...)
+{
+ va_list ap;
+ CURLcode res;
+
+ va_start(ap, info);
+ lazy_load_curl();
+ CURL_IGNORE_DEPRECATION(
+ if (info >= CURLINFO_LONG && info < CURLINFO_DOUBLE)
+ res = curl_easy_getinfo_long_func(curl, info, va_arg(ap, long *));
+ else if ((info >= CURLINFO_STRING && info < CURLINFO_LONG) ||
+ (info >= CURLINFO_SLIST && info < CURLINFO_SOCKET))
+ res = curl_easy_getinfo_pointer_func(curl, info, va_arg(ap, void **));
+ else if (info >= CURLINFO_OFF_T)
+ res = curl_easy_getinfo_off_t_func(curl, info, va_arg(ap, curl_off_t *));
+ else
+ die("%s:%d: TODO (info: %d)!", __FILE__, __LINE__, info);
+ )
+ va_end(ap);
+ return res;
+}
+
+#undef curl_easy_setopt
+CURLcode curl_easy_setopt(CURL *curl, CURLoption opt, ...)
+{
+ va_list ap;
+ CURLcode res;
+
+ va_start(ap, opt);
+ lazy_load_curl();
+ CURL_IGNORE_DEPRECATION(
+ if (opt >= CURLOPTTYPE_LONG && opt < CURLOPTTYPE_OBJECTPOINT)
+ res = curl_easy_setopt_long_func(curl, opt, va_arg(ap, long));
+ else if (opt >= CURLOPTTYPE_OBJECTPOINT && opt < CURLOPTTYPE_OFF_T)
+ res = curl_easy_setopt_pointer_func(curl, opt, va_arg(ap, void *));
+ else if (opt >= CURLOPTTYPE_OFF_T && opt < CURLOPTTYPE_BLOB)
+ res = curl_easy_setopt_off_t_func(curl, opt, va_arg(ap, curl_off_t));
+ else
+ die("%s:%d: TODO (opt: %d)!", __FILE__, __LINE__, opt);
+ )
+ va_end(ap);
+ return res;
+}
diff --git a/compat/mimalloc/LICENSE b/compat/mimalloc/LICENSE
new file mode 100644
index 00000000000000..53315ebee557ac
--- /dev/null
+++ b/compat/mimalloc/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018-2025 Microsoft Corporation, Daan Leijen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/compat/mimalloc/alloc-aligned.c b/compat/mimalloc/alloc-aligned.c
new file mode 100644
index 00000000000000..772b76c2027944
--- /dev/null
+++ b/compat/mimalloc/alloc-aligned.c
@@ -0,0 +1,371 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // mi_prim_get_default_heap
+
+#include // memset
+
+// ------------------------------------------------------
+// Aligned Allocation
+// ------------------------------------------------------
+
+static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
+ // objects up to `MI_MAX_ALIGN_GUARANTEE` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`).
+ mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
+ if (alignment > size) return false;
+ if (alignment <= MI_MAX_ALIGN_SIZE) return true;
+ const size_t bsize = mi_good_size(size);
+ return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
+}
+
+#if MI_GUARDED
+static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept {
+ // use over allocation for guarded blocksl
+ mi_assert_internal(alignment > 0 && alignment < MI_BLOCK_ALIGNMENT_MAX);
+ const size_t oversize = size + alignment - 1;
+ void* base = _mi_heap_malloc_guarded(heap, oversize, zero);
+ void* p = mi_align_up_ptr(base, alignment);
+ mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size);
+ mi_assert_internal(mi_usable_size(p) >= size);
+ mi_assert_internal(_mi_is_aligned(p, alignment));
+ return p;
+}
+
+static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero, size_t* usable) {
+ const size_t rate = heap->guarded_sample_rate;
+ // only write if `rate!=0` so we don't write to the constant `_mi_heap_empty`
+ if (rate != 0) { heap->guarded_sample_rate = 0; }
+ void* p = _mi_heap_malloc_zero_ex(heap, size, zero, 0, usable);
+ if (rate != 0) { heap->guarded_sample_rate = rate; }
+ return p;
+}
+#else
+static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero, size_t* usable) {
+ return _mi_heap_malloc_zero_ex(heap, size, zero, 0, usable);
+}
+#endif
+
+// Fallback aligned allocation that over-allocates -- split out for better codegen
+static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero, size_t* usable) mi_attr_noexcept
+{
+ mi_assert_internal(size <= (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE));
+ mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
+
+ void* p;
+ size_t oversize;
+ if mi_unlikely(alignment > MI_BLOCK_ALIGNMENT_MAX) {
+ // use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
+ // This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
+ // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
+ if mi_unlikely(offset != 0) {
+ // todo: cannot support offset alignment for very large alignments yet
+#if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
+#endif
+ return NULL;
+ }
+ oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
+ // note: no guarded as alignment > 0
+ p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment, usable); // the page block size should be large enough to align in the single huge page block
+ // zero afterwards as only the area from the aligned_p may be committed!
+ if (p == NULL) return NULL;
+ }
+ else {
+ // otherwise over-allocate
+ oversize = (size < MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : size) + alignment - 1; // adjust for size <= 16; with size 0 and aligment 64k, we would allocate a 64k block and pointing just beyond that.
+ p = mi_heap_malloc_zero_no_guarded(heap, oversize, zero, usable);
+ if (p == NULL) return NULL;
+ }
+ mi_page_t* page = _mi_ptr_page(p);
+
+ // .. and align within the allocation
+ const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
+ const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
+ const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset);
+ mi_assert_internal(adjust < alignment);
+ void* aligned_p = (void*)((uintptr_t)p + adjust);
+ if (aligned_p != p) {
+ mi_page_set_has_aligned(page, true);
+ #if MI_GUARDED
+ // set tag to aligned so mi_usable_size works with guard pages
+ if (adjust >= sizeof(mi_block_t)) {
+ mi_block_t* const block = (mi_block_t*)p;
+ block->next = MI_BLOCK_TAG_ALIGNED;
+ }
+ #endif
+ _mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
+ }
+ // todo: expand padding if overallocated ?
+
+ mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size);
+ mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
+ mi_assert_internal(mi_usable_size(aligned_p)>=size);
+ mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
+ #if MI_DEBUG > 1
+ mi_page_t* const apage = _mi_ptr_page(aligned_p);
+ void* unalign_p = _mi_page_ptr_unalign(apage, aligned_p);
+ mi_assert_internal(p == unalign_p);
+ #endif
+
+ // now zero the block if needed
+ if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
+ // for the tracker, on huge aligned allocations only the memory from the start of the large block is defined
+ mi_track_mem_undefined(aligned_p, size);
+ if (zero) {
+ _mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
+ }
+ }
+
+ if (p != aligned_p) {
+ mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
+ #if MI_GUARDED
+ mi_track_mem_defined(p, sizeof(mi_block_t));
+ #endif
+ }
+ return aligned_p;
+}
+
+// Generic primitive aligned allocation -- split out for better codegen
+static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero, size_t* usable) mi_attr_noexcept
+{
+ mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
+ // we don't allocate more than MI_MAX_ALLOC_SIZE (see )
+ if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) {
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
+ #endif
+ return NULL;
+ }
+
+ // use regular allocation if it is guaranteed to fit the alignment constraints.
+ // this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist
+ // a page with the right block size, and if we always use the over-alloc fallback that would never happen.
+ if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) {
+ void* p = mi_heap_malloc_zero_no_guarded(heap, size, zero, usable);
+ mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
+ const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
+ if mi_likely(is_aligned_or_null) {
+ return p;
+ }
+ else {
+ // this should never happen if the `mi_malloc_is_naturally_aligned` check is correct..
+ mi_assert(false);
+ mi_free(p);
+ }
+ }
+
+ // fall back to over-allocation
+ return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero,usable);
+}
+
+
+// Primitive aligned allocation
+static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size,
+ const size_t alignment, const size_t offset, const bool zero,
+ size_t* usable) mi_attr_noexcept
+{
+ // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
+ if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see )
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
+ #endif
+ return NULL;
+ }
+
+ #if MI_GUARDED
+ if (offset==0 && alignment < MI_BLOCK_ALIGNMENT_MAX && mi_heap_malloc_use_guarded(heap,size)) {
+ return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero);
+ }
+ #endif
+
+ // try first if there happens to be a small block available with just the right alignment
+ if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
+ const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
+ const size_t padsize = size + MI_PADDING_SIZE;
+ mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
+ if mi_likely(page->free != NULL) {
+ const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
+ if mi_likely(is_aligned)
+ {
+ if (usable!=NULL) { *usable = mi_page_usable_block_size(page); }
+ void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
+ mi_assert_internal(p != NULL);
+ mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
+ mi_track_malloc(p,size,zero);
+ return p;
+ }
+ }
+ }
+
+ // fallback to generic aligned allocation
+ return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero, usable);
+}
+
+
+// ------------------------------------------------------
+// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
+// ------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false, NULL);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
+}
+
+// ensure a definition is emitted
+#if defined(__cplusplus)
+void* _mi_extern_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
+#endif
+
+// ------------------------------------------------------
+// Aligned Allocation
+// ------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true, NULL);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(count, size, &total)) return NULL;
+ return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_umalloc_aligned(size_t size, size_t alignment, size_t* block_size) mi_attr_noexcept {
+ return mi_heap_malloc_zero_aligned_at(mi_prim_get_default_heap(), size, alignment, 0, false, block_size);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_uzalloc_aligned(size_t size, size_t alignment, size_t* block_size) mi_attr_noexcept {
+ return mi_heap_malloc_zero_aligned_at(mi_prim_get_default_heap(), size, alignment, 0, true, block_size);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment);
+}
+
+
+// ------------------------------------------------------
+// Aligned re-allocation
+// ------------------------------------------------------
+
+static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
+ mi_assert(alignment > 0);
+ if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero,NULL,NULL);
+ if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero,NULL);
+ size_t size = mi_usable_size(p);
+ if (newsize <= size && newsize >= (size - (size / 2))
+ && (((uintptr_t)p + offset) % alignment) == 0) {
+ return p; // reallocation still fits, is aligned and not more than 50% waste
+ }
+ else {
+ // note: we don't zero allocate upfront so we only zero initialize the expanded part
+ void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
+ if (newp != NULL) {
+ if (zero && newsize > size) {
+ // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
+ size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
+ _mi_memzero((uint8_t*)newp + start, newsize - start);
+ }
+ _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
+ mi_free(p); // only free if successful
+ }
+ return newp;
+ }
+}
+
+static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept {
+ mi_assert(alignment > 0);
+ if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero,NULL,NULL);
+ size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
+ return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
+}
+
+mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
+}
+
+mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+ return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
+}
+
+mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
+}
+
+mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+ return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
+}
+
+mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(newcount, size, &total)) return NULL;
+ return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
+}
+
+mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(newcount, size, &total)) return NULL;
+ return mi_heap_rezalloc_aligned(heap, p, total, alignment);
+}
+
+mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
+}
+
+mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+ return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
+}
+
+mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
+}
+
+mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+ return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
+}
+
+mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset);
+}
+
+mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
+}
+
+
diff --git a/compat/mimalloc/alloc.c b/compat/mimalloc/alloc.c
new file mode 100644
index 00000000000000..c0ada97c90daf8
--- /dev/null
+++ b/compat/mimalloc/alloc.c
@@ -0,0 +1,733 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#ifndef _DEFAULT_SOURCE
+#define _DEFAULT_SOURCE // for realpath() on Linux
+#endif
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // _mi_prim_thread_id()
+
+#include // memset, strlen (for mi_strdup)
+#include // malloc, abort
+
+#define MI_IN_ALLOC_C
+#include "free.c"
+#undef MI_IN_ALLOC_C
+
+// ------------------------------------------------------
+// Allocation
+// ------------------------------------------------------
+
+// Fast allocation in a page: just pop from the free list.
+// Fall back to generic allocation only if the list is empty.
+// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
+extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero, size_t* usable) mi_attr_noexcept
+{
+ mi_assert_internal(size >= MI_PADDING_SIZE);
+ mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
+
+ // check the free list
+ mi_block_t* const block = page->free;
+ if mi_unlikely(block == NULL) {
+ return _mi_malloc_generic(heap, size, zero, 0, usable);
+ }
+ mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
+ if (usable != NULL) { *usable = mi_page_usable_block_size(page); };
+ // pop from the free list
+ page->free = mi_block_next(page, block);
+ page->used++;
+ mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
+ mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
+
+ #if MI_DEBUG>3
+ if (page->free_is_zero && size > sizeof(*block)) {
+ mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
+ }
+ #endif
+
+ // allow use of the block internally
+ // note: when tracking we need to avoid ever touching the MI_PADDING since
+ // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`)
+ mi_track_mem_undefined(block, mi_page_usable_block_size(page));
+
+ // zero the block? note: we need to zero the full block size (issue #63)
+ if mi_unlikely(zero) {
+ mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
+ #if MI_PADDING
+ mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
+ #endif
+ if (page->free_is_zero) {
+ block->next = 0;
+ mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
+ }
+ else {
+ _mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE);
+ }
+ }
+
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
+ if (!zero && !mi_page_is_huge(page)) {
+ memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
+ }
+ #elif (MI_SECURE!=0)
+ if (!zero) { block->next = 0; } // don't leak internal data
+ #endif
+
+ #if (MI_STAT>0)
+ const size_t bsize = mi_page_usable_block_size(page);
+ if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ mi_heap_stat_increase(heap, malloc_normal, bsize);
+ mi_heap_stat_counter_increase(heap, malloc_normal_count, 1);
+ #if (MI_STAT>1)
+ const size_t bin = _mi_bin(bsize);
+ mi_heap_stat_increase(heap, malloc_bins[bin], 1);
+ mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE);
+ #endif
+ }
+ #endif
+
+ #if MI_PADDING // && !MI_TRACK_ENABLED
+ mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
+ ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
+ #if (MI_DEBUG>=2)
+ mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
+ #endif
+ mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
+ padding->canary = mi_ptr_encode_canary(page,block,page->keys);
+ padding->delta = (uint32_t)(delta);
+ #if MI_PADDING_CHECK
+ if (!mi_page_is_huge(page)) {
+ uint8_t* fill = (uint8_t*)padding - delta;
+ const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
+ for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
+ }
+ #endif
+ #endif
+
+ return block;
+}
+
+// extra entries for improved efficiency in `alloc-aligned.c`.
+extern void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
+ return _mi_page_malloc_zero(heap,page,size,false,NULL);
+}
+extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
+ return _mi_page_malloc_zero(heap,page,size,true,NULL);
+}
+
+#if MI_GUARDED
+mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
+#endif
+
+static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero, size_t* usable) mi_attr_noexcept {
+ mi_assert(heap != NULL);
+ mi_assert(size <= MI_SMALL_SIZE_MAX);
+ #if MI_DEBUG
+ const uintptr_t tid = _mi_thread_id();
+ mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
+ #endif
+ #if (MI_PADDING || MI_GUARDED)
+ if (size == 0) { size = sizeof(void*); }
+ #endif
+ #if MI_GUARDED
+ if (mi_heap_malloc_use_guarded(heap,size)) {
+ return _mi_heap_malloc_guarded(heap, size, zero);
+ }
+ #endif
+
+ // get page in constant time, and allocate from it
+ mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
+ void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero, usable);
+ mi_track_malloc(p,size,zero);
+
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
+ return p;
+}
+
+// allocate a small block
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(heap, size, false, NULL);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small(mi_prim_get_default_heap(), size);
+}
+
+// The main allocation function
+extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment, size_t* usable) mi_attr_noexcept {
+ // fast path for small objects
+ if mi_likely(size <= MI_SMALL_SIZE_MAX) {
+ mi_assert_internal(huge_alignment == 0);
+ return mi_heap_malloc_small_zero(heap, size, zero, usable);
+ }
+ #if MI_GUARDED
+ else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) {
+ return _mi_heap_malloc_guarded(heap, size, zero);
+ }
+ #endif
+ else {
+ // regular allocation
+ mi_assert(heap!=NULL);
+ mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
+ void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment, usable); // note: size can overflow but it is detected in malloc_generic
+ mi_track_malloc(p,size,zero);
+
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
+ return p;
+ }
+}
+
+extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
+ return _mi_heap_malloc_zero_ex(heap, size, zero, 0, NULL);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return _mi_heap_malloc_zero(heap, size, false);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc(mi_prim_get_default_heap(), size);
+}
+
+// zero initialized small block
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true, NULL);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return _mi_heap_malloc_zero(heap, size, true);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
+ return mi_heap_zalloc(mi_prim_get_default_heap(),size);
+}
+
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(count,size,&total)) return NULL;
+ return mi_heap_zalloc(heap,total);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_calloc(mi_prim_get_default_heap(),count,size);
+}
+
+// Return usable size
+mi_decl_nodiscard mi_decl_restrict void* mi_umalloc_small(size_t size, size_t* usable) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, false, usable);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_umalloc(mi_heap_t* heap, size_t size, size_t* usable) mi_attr_noexcept {
+ return _mi_heap_malloc_zero_ex(heap, size, false, 0, usable);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_umalloc(size_t size, size_t* usable) mi_attr_noexcept {
+ return mi_heap_umalloc(mi_prim_get_default_heap(), size, usable);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_uzalloc(size_t size, size_t* usable) mi_attr_noexcept {
+ return _mi_heap_malloc_zero_ex(mi_prim_get_default_heap(), size, true, 0, usable);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_ucalloc(size_t count, size_t size, size_t* usable) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(count,size,&total)) return NULL;
+ return mi_uzalloc(total, usable);
+}
+
+// Uninitialized `calloc`
+mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(count, size, &total)) return NULL;
+ return mi_heap_malloc(heap, total);
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_mallocn(mi_prim_get_default_heap(),count,size);
+}
+
+// Expand (or shrink) in place (or fail)
+void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
+ #if MI_PADDING
+ // we do not shrink/expand with padding enabled
+ MI_UNUSED(p); MI_UNUSED(newsize);
+ return NULL;
+ #else
+ if (p == NULL) return NULL;
+ const mi_page_t* const page = mi_validate_ptr_page(p,"mi_expand");
+ const size_t size = _mi_usable_size(p,page);
+ if (newsize > size) return NULL;
+ return p; // it fits
+ #endif
+}
+
+void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero, size_t* usable_pre, size_t* usable_post) mi_attr_noexcept {
+ // if p == NULL then behave as malloc.
+ // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
+ // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
+ const mi_page_t* page;
+ size_t size;
+ if (p==NULL) {
+ page = NULL;
+ size = 0;
+ if (usable_pre!=NULL) { *usable_pre = 0; }
+ }
+ else {
+ page = mi_validate_ptr_page(p,"mi_realloc");
+ size = _mi_usable_size(p,page);
+ if (usable_pre!=NULL) { *usable_pre = mi_page_usable_block_size(page); }
+ }
+ if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
+ mi_assert_internal(p!=NULL);
+ // todo: do not track as the usable size is still the same in the free; adjust potential padding?
+ // mi_track_resize(p,size,newsize)
+ // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); }
+ if (usable_post!=NULL) { *usable_post = mi_page_usable_block_size(page); }
+ return p; // reallocation still fits and not more than 50% waste
+ }
+ void* newp = mi_heap_umalloc(heap,newsize,usable_post);
+ if mi_likely(newp != NULL) {
+ if (zero && newsize > size) {
+ // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
+ const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
+ _mi_memzero((uint8_t*)newp + start, newsize - start);
+ }
+ else if (newsize == 0) {
+ ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
+ }
+ if mi_likely(p != NULL) {
+ const size_t copysize = (newsize > size ? size : newsize);
+ mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking..
+ _mi_memcpy(newp, p, copysize);
+ mi_free(p); // only free the original pointer if successful
+ }
+ }
+ return newp;
+}
+
+mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+ return _mi_heap_realloc_zero(heap, p, newsize, false, NULL, NULL);
+}
+
+mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(count, size, &total)) return NULL;
+ return mi_heap_realloc(heap, p, total);
+}
+
+
+// Reallocate but free `p` on errors
+mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+ void* newp = mi_heap_realloc(heap, p, newsize);
+ if (newp==NULL && p!=NULL) mi_free(p);
+ return newp;
+}
+
+mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+ return _mi_heap_realloc_zero(heap, p, newsize, true, NULL, NULL);
+}
+
+mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+ size_t total;
+ if (mi_count_size_overflow(count, size, &total)) return NULL;
+ return mi_heap_rezalloc(heap, p, total);
+}
+
+
+mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
+ return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize);
+}
+
+mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size);
+}
+
+mi_decl_nodiscard void* mi_urealloc(void* p, size_t newsize, size_t* usable_pre, size_t* usable_post) mi_attr_noexcept {
+ return _mi_heap_realloc_zero(mi_prim_get_default_heap(),p,newsize, false, usable_pre, usable_post);
+}
+
+// Reallocate but free `p` on errors
+mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
+ return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize);
+}
+
+mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
+ return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize);
+}
+
+mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size);
+}
+
+
+
+// ------------------------------------------------------
+// strdup, strndup, and realpath
+// ------------------------------------------------------
+
+// `strdup` using mi_malloc
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
+ if (s == NULL) return NULL;
+ size_t len = _mi_strlen(s);
+ char* t = (char*)mi_heap_malloc(heap,len+1);
+ if (t == NULL) return NULL;
+ _mi_memcpy(t, s, len);
+ t[len] = 0;
+ return t;
+}
+
+mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
+ return mi_heap_strdup(mi_prim_get_default_heap(), s);
+}
+
+// `strndup` using mi_malloc
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
+ if (s == NULL) return NULL;
+ const size_t len = _mi_strnlen(s,n); // len <= n
+ char* t = (char*)mi_heap_malloc(heap, len+1);
+ if (t == NULL) return NULL;
+ _mi_memcpy(t, s, len);
+ t[len] = 0;
+ return t;
+}
+
+mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
+ return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
+}
+
+#ifndef __wasi__
+// `realpath` using mi_malloc
+#ifdef _WIN32
+#ifndef PATH_MAX
+#define PATH_MAX MAX_PATH
+#endif
+
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
+ // todo: use GetFullPathNameW to allow longer file names
+ char buf[PATH_MAX];
+ DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
+ if (res == 0) {
+ errno = GetLastError(); return NULL;
+ }
+ else if (res > PATH_MAX) {
+ errno = EINVAL; return NULL;
+ }
+ else if (resolved_name != NULL) {
+ return resolved_name;
+ }
+ else {
+ return mi_heap_strndup(heap, buf, PATH_MAX);
+ }
+}
+#else
+/*
+#include // pathconf
+static size_t mi_path_max(void) {
+ static size_t path_max = 0;
+ if (path_max <= 0) {
+ long m = pathconf("/",_PC_PATH_MAX);
+ if (m <= 0) path_max = 4096; // guess
+ else if (m < 256) path_max = 256; // at least 256
+ else path_max = m;
+ }
+ return path_max;
+}
+*/
+char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
+ if (resolved_name != NULL) {
+ return realpath(fname,resolved_name);
+ }
+ else {
+ char* rname = realpath(fname, NULL);
+ if (rname == NULL) return NULL;
+ char* result = mi_heap_strdup(heap, rname);
+ mi_cfree(rname); // use checked free (which may be redirected to our free but that's ok)
+ // note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-(
+ return result;
+ }
+ /*
+ const size_t n = mi_path_max();
+ char* buf = (char*)mi_malloc(n+1);
+ if (buf == NULL) {
+ errno = ENOMEM;
+ return NULL;
+ }
+ char* rname = realpath(fname,buf);
+ char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
+ mi_free(buf);
+ return result;
+ }
+ */
+}
+#endif
+
+mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
+ return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name);
+}
+#endif
+
+/*-------------------------------------------------------
+C++ new and new_aligned
+The standard requires calling into `get_new_handler` and
+throwing the bad_alloc exception on failure. If we compile
+with a C++ compiler we can implement this precisely. If we
+use a C compiler we cannot throw a `bad_alloc` exception
+but we call `exit` instead (i.e. not returning).
+-------------------------------------------------------*/
+
+#ifdef __cplusplus
+#include
+static bool mi_try_new_handler(bool nothrow) {
+ #if defined(_MSC_VER) || (__cplusplus >= 201103L)
+ std::new_handler h = std::get_new_handler();
+ #else
+ std::new_handler h = std::set_new_handler();
+ std::set_new_handler(h);
+ #endif
+ if (h==NULL) {
+ _mi_error_message(ENOMEM, "out of memory in 'new'");
+ #if defined(_CPPUNWIND) || defined(__cpp_exceptions) // exceptions are not always enabled
+ if (!nothrow) {
+ throw std::bad_alloc();
+ }
+ #else
+ MI_UNUSED(nothrow);
+ #endif
+ return false;
+ }
+ else {
+ h();
+ return true;
+ }
+}
+#else
+typedef void (*std_new_handler_t)(void);
+
+#if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631
+std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) {
+ return NULL;
+}
+static std_new_handler_t mi_get_new_handler(void) {
+ return _ZSt15get_new_handlerv();
+}
+#else
+// note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`.
+static std_new_handler_t mi_get_new_handler(void) {
+ return NULL;
+}
+#endif
+
+static bool mi_try_new_handler(bool nothrow) {
+ std_new_handler_t h = mi_get_new_handler();
+ if (h==NULL) {
+ _mi_error_message(ENOMEM, "out of memory in 'new'");
+ if (!nothrow) {
+ abort(); // cannot throw in plain C, use abort
+ }
+ return false;
+ }
+ else {
+ h();
+ return true;
+ }
+}
+#endif
+
+mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
+ void* p = NULL;
+ while(p == NULL && mi_try_new_handler(nothrow)) {
+ p = mi_heap_malloc(heap,size);
+ }
+ return p;
+}
+
+static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
+ return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow);
+}
+
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
+ void* p = mi_heap_malloc(heap,size);
+ if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
+ return p;
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
+ return mi_heap_alloc_new(mi_prim_get_default_heap(), size);
+}
+
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
+ size_t total;
+ if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
+ mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
+ return NULL;
+ }
+ else {
+ return mi_heap_alloc_new(heap,total);
+ }
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
+ return mi_heap_alloc_new_n(mi_prim_get_default_heap(), count, size);
+}
+
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
+ void* p = mi_malloc(size);
+ if mi_unlikely(p == NULL) return mi_try_new(size, true);
+ return p;
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
+ void* p;
+ do {
+ p = mi_malloc_aligned(size, alignment);
+ }
+ while(p == NULL && mi_try_new_handler(false));
+ return p;
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
+ void* p;
+ do {
+ p = mi_malloc_aligned(size, alignment);
+ }
+ while(p == NULL && mi_try_new_handler(true));
+ return p;
+}
+
+mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
+ void* q;
+ do {
+ q = mi_realloc(p, newsize);
+ } while (q == NULL && mi_try_new_handler(false));
+ return q;
+}
+
+mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
+ size_t total;
+ if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
+ mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
+ return NULL;
+ }
+ else {
+ return mi_new_realloc(p, total);
+ }
+}
+
+#if MI_GUARDED
+// We always allocate a guarded allocation at an offset (`mi_page_has_aligned` will be true).
+// We then set the first word of the block to `0` for regular offset aligned allocations (in `alloc-aligned.c`)
+// and the first word to `~0` for guarded allocations to have a correct `mi_usable_size`
+
+static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
+ // TODO: we can still make padding work by moving it out of the guard page area
+ mi_page_t* const page = _mi_ptr_page(block);
+ mi_page_set_has_aligned(page, true);
+ block->next = MI_BLOCK_TAG_GUARDED;
+
+ // set guard page at the end of the block
+ mi_segment_t* const segment = _mi_page_segment(page);
+ const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
+ const size_t os_page_size = _mi_os_page_size();
+ mi_assert_internal(block_size >= obj_size + os_page_size + sizeof(mi_block_t));
+ if (block_size < obj_size + os_page_size + sizeof(mi_block_t)) {
+ // should never happen
+ mi_free(block);
+ return NULL;
+ }
+ uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size;
+ mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
+ if mi_likely(segment->allow_decommit && _mi_is_aligned(guard_page, os_page_size)) {
+ const bool ok = _mi_os_protect(guard_page, os_page_size);
+ if mi_unlikely(!ok) {
+ _mi_warning_message("failed to set a guard page behind an object (object %p of size %zu)\n", block, block_size);
+ }
+ }
+ else {
+ _mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", block, block_size);
+ }
+
+ // align pointer just in front of the guard page
+ size_t offset = block_size - os_page_size - obj_size;
+ mi_assert_internal(offset > sizeof(mi_block_t));
+ if (offset > MI_BLOCK_ALIGNMENT_MAX) {
+ // give up to place it right in front of the guard page if the offset is too large for unalignment
+ offset = MI_BLOCK_ALIGNMENT_MAX;
+ }
+ void* p = (uint8_t*)block + offset;
+ mi_track_align(block, p, offset, obj_size);
+ mi_track_mem_defined(block, sizeof(mi_block_t));
+ return p;
+}
+
+mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
+{
+ #if defined(MI_PADDING_SIZE)
+ mi_assert(MI_PADDING_SIZE==0);
+ #endif
+ // allocate multiple of page size ending in a guard page
+ // ensure minimal alignment requirement?
+ const size_t os_page_size = _mi_os_page_size();
+ const size_t obj_size = (mi_option_is_enabled(mi_option_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE));
+ const size_t bsize = _mi_align_up(_mi_align_up(obj_size, MI_MAX_ALIGN_SIZE) + sizeof(mi_block_t), MI_MAX_ALIGN_SIZE);
+ const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
+ mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */, NULL);
+ if (block==NULL) return NULL;
+ void* const p = mi_block_ptr_set_guarded(block, obj_size);
+
+ // stats
+ mi_track_malloc(p, size, zero);
+ if (p != NULL) {
+ if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
+ #if MI_STAT>1
+ mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size);
+ mi_heap_stat_increase(heap, malloc_requested, size);
+ #endif
+ _mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1);
+ }
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
+ return p;
+}
+#endif
+
+// ------------------------------------------------------
+// ensure explicit external inline definitions are emitted!
+// ------------------------------------------------------
+
+#ifdef __cplusplus
+void* _mi_externs[] = {
+ (void*)&_mi_page_malloc,
+ (void*)&_mi_page_malloc_zero,
+ (void*)&_mi_heap_malloc_zero,
+ (void*)&_mi_heap_malloc_zero_ex,
+ (void*)&mi_malloc,
+ (void*)&mi_malloc_small,
+ (void*)&mi_zalloc_small,
+ (void*)&mi_heap_malloc,
+ (void*)&mi_heap_zalloc,
+ (void*)&mi_heap_malloc_small,
+ // (void*)&mi_heap_alloc_new,
+ // (void*)&mi_heap_alloc_new_n
+};
+#endif
diff --git a/compat/mimalloc/arena-abandon.c b/compat/mimalloc/arena-abandon.c
new file mode 100644
index 00000000000000..460c80fc22782f
--- /dev/null
+++ b/compat/mimalloc/arena-abandon.c
@@ -0,0 +1,346 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+#if !defined(MI_IN_ARENA_C)
+#error "this file should be included from 'arena.c' (so mi_arena_t is visible)"
+// add includes help an IDE
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "bitmap.h"
+#endif
+
+// Minimal exports for arena-abandoned.
+size_t mi_arena_id_index(mi_arena_id_t id);
+mi_arena_t* mi_arena_from_index(size_t idx);
+size_t mi_arena_get_count(void);
+void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex);
+bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index);
+
+/* -----------------------------------------------------------
+ Abandoned blocks/segments:
+
+ _mi_arena_segment_clear_abandoned
+ _mi_arena_segment_mark_abandoned
+
+ This is used to atomically abandon/reclaim segments
+ (and crosses the arena API but it is convenient to have here).
+
+ Abandoned segments still have live blocks; they get reclaimed
+ when a thread frees a block in it, or when a thread needs a fresh
+ segment.
+
+ Abandoned segments are atomically marked in the `block_abandoned`
+ bitmap of arenas. Any segments allocated outside arenas are put
+ in the sub-process `abandoned_os_list`. This list is accessed
+ using locks but this should be uncommon and generally uncontended.
+ Reclaim and visiting either scan through the `block_abandoned`
+ bitmaps of the arena's, or visit the `abandoned_os_list`
+
+ A potentially nicer design is to use arena's for everything
+ and perhaps have virtual arena's to map OS allocated memory
+ but this would lack the "density" of our current arena's. TBC.
+----------------------------------------------------------- */
+
+
+// reclaim a specific OS abandoned segment; `true` on success.
+// sets the thread_id.
+static bool mi_arena_segment_os_clear_abandoned(mi_segment_t* segment, bool take_lock) {
+ mi_assert(segment->memid.memkind != MI_MEM_ARENA);
+ // not in an arena, remove from list of abandoned os segments
+ mi_subproc_t* const subproc = segment->subproc;
+ if (take_lock && !mi_lock_try_acquire(&subproc->abandoned_os_lock)) {
+ return false; // failed to acquire the lock, we just give up
+ }
+ // remove atomically from the abandoned os list (if possible!)
+ bool reclaimed = false;
+ mi_segment_t* const next = segment->abandoned_os_next;
+ mi_segment_t* const prev = segment->abandoned_os_prev;
+ if (next != NULL || prev != NULL || subproc->abandoned_os_list == segment) {
+ #if MI_DEBUG>3
+ // find ourselves in the abandoned list (and check the count)
+ bool found = false;
+ size_t count = 0;
+ for (mi_segment_t* current = subproc->abandoned_os_list; current != NULL; current = current->abandoned_os_next) {
+ if (current == segment) { found = true; }
+ count++;
+ }
+ mi_assert_internal(found);
+ mi_assert_internal(count == mi_atomic_load_relaxed(&subproc->abandoned_os_list_count));
+ #endif
+ // remove (atomically) from the list and reclaim
+ if (prev != NULL) { prev->abandoned_os_next = next; }
+ else { subproc->abandoned_os_list = next; }
+ if (next != NULL) { next->abandoned_os_prev = prev; }
+ else { subproc->abandoned_os_list_tail = prev; }
+ segment->abandoned_os_next = NULL;
+ segment->abandoned_os_prev = NULL;
+ mi_atomic_decrement_relaxed(&subproc->abandoned_count);
+ mi_atomic_decrement_relaxed(&subproc->abandoned_os_list_count);
+ if (take_lock) { // don't reset the thread_id when iterating
+ mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
+ }
+ reclaimed = true;
+ }
+ if (take_lock) { mi_lock_release(&segment->subproc->abandoned_os_lock); }
+ return reclaimed;
+}
+
+// reclaim a specific abandoned segment; `true` on success.
+// sets the thread_id.
+bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment) {
+ if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) {
+ return mi_arena_segment_os_clear_abandoned(segment, true /* take lock */);
+ }
+ // arena segment: use the blocks_abandoned bitmap.
+ size_t arena_idx;
+ size_t bitmap_idx;
+ mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx);
+ mi_arena_t* arena = mi_arena_from_index(arena_idx);
+ mi_assert_internal(arena != NULL);
+ // reclaim atomically
+ bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
+ if (was_marked) {
+ mi_assert_internal(mi_atomic_load_acquire(&segment->thread_id) == 0);
+ mi_atomic_decrement_relaxed(&segment->subproc->abandoned_count);
+ mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
+ }
+ // mi_assert_internal(was_marked);
+ mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
+ //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
+ return was_marked;
+}
+
+
+// mark a specific OS segment as abandoned
+static void mi_arena_segment_os_mark_abandoned(mi_segment_t* segment) {
+ mi_assert(segment->memid.memkind != MI_MEM_ARENA);
+ // not in an arena; we use a list of abandoned segments
+ mi_subproc_t* const subproc = segment->subproc;
+ mi_lock(&subproc->abandoned_os_lock) {
+ // push on the tail of the list (important for the visitor)
+ mi_segment_t* prev = subproc->abandoned_os_list_tail;
+ mi_assert_internal(prev == NULL || prev->abandoned_os_next == NULL);
+ mi_assert_internal(segment->abandoned_os_prev == NULL);
+ mi_assert_internal(segment->abandoned_os_next == NULL);
+ if (prev != NULL) { prev->abandoned_os_next = segment; }
+ else { subproc->abandoned_os_list = segment; }
+ subproc->abandoned_os_list_tail = segment;
+ segment->abandoned_os_prev = prev;
+ segment->abandoned_os_next = NULL;
+ mi_atomic_increment_relaxed(&subproc->abandoned_os_list_count);
+ mi_atomic_increment_relaxed(&subproc->abandoned_count);
+ // and release the lock
+ }
+ return;
+}
+
+// mark a specific segment as abandoned
+// clears the thread_id.
+void _mi_arena_segment_mark_abandoned(mi_segment_t* segment)
+{
+ mi_assert_internal(segment->used == segment->abandoned);
+ mi_atomic_store_release(&segment->thread_id, (uintptr_t)0); // mark as abandoned for multi-thread free's
+ if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) {
+ mi_arena_segment_os_mark_abandoned(segment);
+ return;
+ }
+ // segment is in an arena, mark it in the arena `blocks_abandoned` bitmap
+ size_t arena_idx;
+ size_t bitmap_idx;
+ mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx);
+ mi_arena_t* arena = mi_arena_from_index(arena_idx);
+ mi_assert_internal(arena != NULL);
+ // set abandonment atomically
+ mi_subproc_t* const subproc = segment->subproc; // don't access the segment after setting it abandoned
+ const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
+ if (was_unmarked) { mi_atomic_increment_relaxed(&subproc->abandoned_count); }
+ mi_assert_internal(was_unmarked);
+ mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
+}
+
+
+/* -----------------------------------------------------------
+ Iterate through the abandoned blocks/segments using a cursor.
+ This is used for reclaiming and abandoned block visiting.
+----------------------------------------------------------- */
+
+// start a cursor at a randomized arena
+void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, bool visit_all, mi_arena_field_cursor_t* current) {
+ mi_assert_internal(heap == NULL || heap->tld->segments.subproc == subproc);
+ current->bitmap_idx = 0;
+ current->subproc = subproc;
+ current->visit_all = visit_all;
+ current->hold_visit_lock = false;
+ const size_t abandoned_count = mi_atomic_load_relaxed(&subproc->abandoned_count);
+ const size_t abandoned_list_count = mi_atomic_load_relaxed(&subproc->abandoned_os_list_count);
+ const size_t max_arena = mi_arena_get_count();
+ if (heap != NULL && heap->arena_id != _mi_arena_id_none()) {
+ // for a heap that is bound to one arena, only visit that arena
+ current->start = mi_arena_id_index(heap->arena_id);
+ current->end = current->start + 1;
+ current->os_list_count = 0;
+ }
+ else {
+ // otherwise visit all starting at a random location
+ if (abandoned_count > abandoned_list_count && max_arena > 0) {
+ current->start = (heap == NULL || max_arena == 0 ? 0 : (mi_arena_id_t)(_mi_heap_random_next(heap) % max_arena));
+ current->end = current->start + max_arena;
+ }
+ else {
+ current->start = 0;
+ current->end = 0;
+ }
+ current->os_list_count = abandoned_list_count; // max entries to visit in the os abandoned list
+ }
+ mi_assert_internal(current->start <= max_arena);
+}
+
+void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current) {
+ if (current->hold_visit_lock) {
+ mi_lock_release(¤t->subproc->abandoned_os_visit_lock);
+ current->hold_visit_lock = false;
+ }
+}
+
+static mi_segment_t* mi_arena_segment_clear_abandoned_at(mi_arena_t* arena, mi_subproc_t* subproc, mi_bitmap_index_t bitmap_idx) {
+ // try to reclaim an abandoned segment in the arena atomically
+ if (!_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) return NULL;
+ mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
+ mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx);
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0);
+ // check that the segment belongs to our sub-process
+ // note: this is the reason we need the `abandoned_visit` lock in the case abandoned visiting is enabled.
+ // without the lock an abandoned visit may otherwise fail to visit all abandoned segments in the sub-process.
+ // for regular reclaim it is fine to miss one sometimes so without abandoned visiting we don't need the `abandoned_visit` lock.
+ if (segment->subproc != subproc) {
+ // it is from another sub-process, re-mark it and continue searching
+ const bool was_zero = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
+ mi_assert_internal(was_zero); MI_UNUSED(was_zero);
+ return NULL;
+ }
+ else {
+ // success, we unabandoned a segment in our sub-process
+ mi_atomic_decrement_relaxed(&subproc->abandoned_count);
+ return segment;
+ }
+}
+
+static mi_segment_t* mi_arena_segment_clear_abandoned_next_field(mi_arena_field_cursor_t* previous) {
+ const size_t max_arena = mi_arena_get_count();
+ size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx);
+ size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx);
+ // visit arena's (from the previous cursor)
+ for (; previous->start < previous->end; previous->start++, field_idx = 0, bit_idx = 0) {
+ // index wraps around
+ size_t arena_idx = (previous->start >= max_arena ? previous->start % max_arena : previous->start);
+ mi_arena_t* arena = mi_arena_from_index(arena_idx);
+ if (arena != NULL) {
+ bool has_lock = false;
+ // visit the abandoned fields (starting at previous_idx)
+ for (; field_idx < arena->field_count; field_idx++, bit_idx = 0) {
+ size_t field = mi_atomic_load_relaxed(&arena->blocks_abandoned[field_idx]);
+ if mi_unlikely(field != 0) { // skip zero fields quickly
+ // we only take the arena lock if there are actually abandoned segments present
+ if (!has_lock && mi_option_is_enabled(mi_option_visit_abandoned)) {
+ has_lock = (previous->visit_all ? (mi_lock_acquire(&arena->abandoned_visit_lock),true) : mi_lock_try_acquire(&arena->abandoned_visit_lock));
+ if (!has_lock) {
+ if (previous->visit_all) {
+ _mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the visitor lock");
+ }
+ // skip to next arena
+ break;
+ }
+ }
+ mi_assert_internal(has_lock || !mi_option_is_enabled(mi_option_visit_abandoned));
+ // visit each set bit in the field (todo: maybe use `ctz` here?)
+ for (; bit_idx < MI_BITMAP_FIELD_BITS; bit_idx++) {
+ // pre-check if the bit is set
+ size_t mask = ((size_t)1 << bit_idx);
+ if mi_unlikely((field & mask) == mask) {
+ mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
+ mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, bitmap_idx);
+ if (segment != NULL) {
+ //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
+ if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); }
+ previous->bitmap_idx = mi_bitmap_index_create_ex(field_idx, bit_idx + 1); // start at next one for the next iteration
+ return segment;
+ }
+ }
+ }
+ }
+ }
+ if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); }
+ }
+ }
+ return NULL;
+}
+
+static mi_segment_t* mi_arena_segment_clear_abandoned_next_list(mi_arena_field_cursor_t* previous) {
+ // go through the abandoned_os_list
+ // we only allow one thread per sub-process to do to visit guarded by the `abandoned_os_visit_lock`.
+ // The lock is released when the cursor is released.
+ if (!previous->hold_visit_lock) {
+ previous->hold_visit_lock = (previous->visit_all ? (mi_lock_acquire(&previous->subproc->abandoned_os_visit_lock),true)
+ : mi_lock_try_acquire(&previous->subproc->abandoned_os_visit_lock));
+ if (!previous->hold_visit_lock) {
+ if (previous->visit_all) {
+ _mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the OS visitor lock");
+ }
+ return NULL; // we cannot get the lock, give up
+ }
+ }
+ // One list entry at a time
+ while (previous->os_list_count > 0) {
+ previous->os_list_count--;
+ mi_lock_acquire(&previous->subproc->abandoned_os_lock); // this could contend with concurrent OS block abandonment and reclaim from `free`
+ mi_segment_t* segment = previous->subproc->abandoned_os_list;
+ // pop from head of the list, a subsequent mark will push at the end (and thus we iterate through os_list_count entries)
+ if (segment == NULL || mi_arena_segment_os_clear_abandoned(segment, false /* we already have the lock */)) {
+ mi_lock_release(&previous->subproc->abandoned_os_lock);
+ return segment;
+ }
+ // already abandoned, try again
+ mi_lock_release(&previous->subproc->abandoned_os_lock);
+ }
+ // done
+ mi_assert_internal(previous->os_list_count == 0);
+ return NULL;
+}
+
+
+// reclaim abandoned segments
+// this does not set the thread id (so it appears as still abandoned)
+mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous) {
+ if (previous->start < previous->end) {
+ // walk the arena
+ mi_segment_t* segment = mi_arena_segment_clear_abandoned_next_field(previous);
+ if (segment != NULL) { return segment; }
+ }
+ // no entries in the arena's anymore, walk the abandoned OS list
+ mi_assert_internal(previous->start == previous->end);
+ return mi_arena_segment_clear_abandoned_next_list(previous);
+}
+
+
+bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
+ // (unfortunately) the visit_abandoned option must be enabled from the start.
+ // This is to avoid taking locks if abandoned list visiting is not required (as for most programs)
+ if (!mi_option_is_enabled(mi_option_visit_abandoned)) {
+ _mi_error_message(EFAULT, "internal error: can only visit abandoned blocks when MIMALLOC_VISIT_ABANDONED=ON");
+ return false;
+ }
+ mi_arena_field_cursor_t current;
+ _mi_arena_field_cursor_init(NULL, _mi_subproc_from_id(subproc_id), true /* visit all (blocking) */, ¤t);
+ mi_segment_t* segment;
+ bool ok = true;
+ while (ok && (segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) {
+ ok = _mi_segment_visit_blocks(segment, heap_tag, visit_blocks, visitor, arg);
+ _mi_arena_segment_mark_abandoned(segment);
+ }
+ _mi_arena_field_cursor_done(¤t);
+ return ok;
+}
diff --git a/compat/mimalloc/arena.c b/compat/mimalloc/arena.c
new file mode 100644
index 00000000000000..c87dd23b54107c
--- /dev/null
+++ b/compat/mimalloc/arena.c
@@ -0,0 +1,1045 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* ----------------------------------------------------------------------------
+"Arenas" are fixed area's of OS memory from which we can allocate
+large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
+In contrast to the rest of mimalloc, the arenas are shared between
+threads and need to be accessed using atomic operations.
+
+Arenas are also used to for huge OS page (1GiB) reservations or for reserving
+OS memory upfront which can be improve performance or is sometimes needed
+on embedded devices. We can also employ this with WASI or `sbrk` systems
+to reserve large arenas upfront and be able to reuse the memory more effectively.
+
+The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
+-----------------------------------------------------------------------------*/
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "bitmap.h"
+
+
+/* -----------------------------------------------------------
+ Arena allocation
+----------------------------------------------------------- */
+
+// A memory arena descriptor
+typedef struct mi_arena_s {
+ mi_arena_id_t id; // arena id; 0 for non-specific
+ mi_memid_t memid; // memid of the memory area
+ _Atomic(uint8_t*) start; // the start of the memory area
+ size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
+ size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
+ size_t meta_size; // size of the arena structure itself (including its bitmaps)
+ mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
+ int numa_node; // associated NUMA node
+ bool exclusive; // only allow allocations if specifically for this arena
+ bool is_large; // memory area consists of large- or huge OS pages (always committed)
+ mi_lock_t abandoned_visit_lock; // lock is only used when abandoned segments are being visited
+ _Atomic(size_t) search_idx; // optimization to start the search for free blocks
+ _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be purged from `blocks_purge`.
+
+ mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
+ mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
+ mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
+ mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
+ mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
+ // do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields.
+} mi_arena_t;
+
+
+#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
+#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB
+#define MI_MAX_ARENAS (132) // Limited as the reservation exponentially increases (and takes up .bss)
+
+// The available arenas
+static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
+static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
+static mi_decl_cache_align _Atomic(int64_t) mi_arenas_purge_expire; // set if there exist purgeable arenas
+
+#define MI_IN_ARENA_C
+#include "arena-abandon.c"
+#undef MI_IN_ARENA_C
+
+/* -----------------------------------------------------------
+ Arena id's
+ id = arena_index + 1
+----------------------------------------------------------- */
+
+size_t mi_arena_id_index(mi_arena_id_t id) {
+ return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
+}
+
+static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
+ mi_assert_internal(arena_index < MI_MAX_ARENAS);
+ return (int)arena_index + 1;
+}
+
+mi_arena_id_t _mi_arena_id_none(void) {
+ return 0;
+}
+
+static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
+ return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
+ (arena_id == req_arena_id));
+}
+
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
+ if (memid.memkind == MI_MEM_ARENA) {
+ return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
+ }
+ else {
+ return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id);
+ }
+}
+
+bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
+ return (memid.memkind == MI_MEM_OS);
+}
+
+size_t mi_arena_get_count(void) {
+ return mi_atomic_load_relaxed(&mi_arena_count);
+}
+
+mi_arena_t* mi_arena_from_index(size_t idx) {
+ mi_assert_internal(idx < mi_arena_get_count());
+ return mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[idx]);
+}
+
+
+/* -----------------------------------------------------------
+ Arena allocations get a (currently) 16-bit memory id where the
+ lower 8 bits are the arena id, and the upper bits the block index.
+----------------------------------------------------------- */
+
+static size_t mi_block_count_of_size(size_t size) {
+ return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
+}
+
+static size_t mi_arena_block_size(size_t bcount) {
+ return (bcount * MI_ARENA_BLOCK_SIZE);
+}
+
+static size_t mi_arena_size(mi_arena_t* arena) {
+ return mi_arena_block_size(arena->block_count);
+}
+
+static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
+ memid.mem.arena.id = id;
+ memid.mem.arena.block_index = bitmap_index;
+ memid.mem.arena.is_exclusive = is_exclusive;
+ return memid;
+}
+
+bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
+ mi_assert_internal(memid.memkind == MI_MEM_ARENA);
+ *arena_index = mi_arena_id_index(memid.mem.arena.id);
+ *bitmap_index = memid.mem.arena.block_index;
+ return memid.mem.arena.is_exclusive;
+}
+
+
+
+/* -----------------------------------------------------------
+ Special static area for mimalloc internal structures
+ to avoid OS calls (for example, for the arena metadata (~= 256b))
+----------------------------------------------------------- */
+
+#define MI_ARENA_STATIC_MAX ((MI_INTPTR_SIZE/2)*MI_KiB) // 4 KiB on 64-bit
+
+static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895
+static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top;
+
+static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
+ *memid = _mi_memid_none();
+ if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
+ const size_t toplow = mi_atomic_load_relaxed(&mi_arena_static_top);
+ if ((toplow + size) > MI_ARENA_STATIC_MAX) return NULL;
+
+ // try to claim space
+ if (alignment < MI_MAX_ALIGN_SIZE) { alignment = MI_MAX_ALIGN_SIZE; }
+ const size_t oversize = size + alignment - 1;
+ if (toplow + oversize > MI_ARENA_STATIC_MAX) return NULL;
+ const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
+ size_t top = oldtop + oversize;
+ if (top > MI_ARENA_STATIC_MAX) {
+ // try to roll back, ok if this fails
+ mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
+ return NULL;
+ }
+
+ // success
+ *memid = _mi_memid_create(MI_MEM_STATIC);
+ memid->initially_zero = true;
+ const size_t start = _mi_align_up(oldtop, alignment);
+ uint8_t* const p = &mi_arena_static[start];
+ _mi_memzero_aligned(p, size);
+ return p;
+}
+
+void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
+ *memid = _mi_memid_none();
+
+ // try static
+ void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid);
+ if (p != NULL) return p;
+
+ // or fall back to the OS
+ p = _mi_os_zalloc(size, memid);
+ if (p == NULL) return NULL;
+
+ return p;
+}
+
+void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
+ if (mi_memkind_is_os(memid.memkind)) {
+ _mi_os_free(p, size, memid);
+ }
+ else {
+ mi_assert(memid.memkind == MI_MEM_STATIC);
+ }
+}
+
+void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
+ return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
+}
+
+
+/* -----------------------------------------------------------
+ Thread safe allocation in an arena
+----------------------------------------------------------- */
+
+// claim the `blocks_inuse` bits
+static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
+{
+ size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
+ if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
+ mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
+ return true;
+ };
+ return false;
+}
+
+
+/* -----------------------------------------------------------
+ Arena Allocation
+----------------------------------------------------------- */
+
+static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
+ bool commit, mi_memid_t* memid)
+{
+ MI_UNUSED(arena_index);
+ mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
+
+ mi_bitmap_index_t bitmap_index;
+ if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
+
+ // claimed it!
+ void* p = mi_arena_block_start(arena, bitmap_index);
+ *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
+ memid->is_pinned = arena->memid.is_pinned;
+
+ // none of the claimed blocks should be scheduled for a decommit
+ if (arena->blocks_purge != NULL) {
+ // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
+ _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
+ }
+
+ // set the dirty bits (todo: no need for an atomic op here?)
+ if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
+ memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL, NULL);
+ }
+
+ // set commit state
+ if (arena->blocks_committed == NULL) {
+ // always committed
+ memid->initially_committed = true;
+ }
+ else if (commit) {
+ // commit requested, but the range may not be committed as a whole: ensure it is committed now
+ memid->initially_committed = true;
+ const size_t commit_size = mi_arena_block_size(needed_bcount);
+ bool any_uncommitted;
+ size_t already_committed = 0;
+ _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted, &already_committed);
+ if (any_uncommitted) {
+ mi_assert_internal(already_committed < needed_bcount);
+ const size_t stat_commit_size = commit_size - mi_arena_block_size(already_committed);
+ bool commit_zero = false;
+ if (!_mi_os_commit_ex(p, commit_size, &commit_zero, stat_commit_size)) {
+ memid->initially_committed = false;
+ }
+ else {
+ if (commit_zero) { memid->initially_zero = true; }
+ }
+ }
+ else {
+ // all are already committed: signal that we are reusing memory in case it was purged before
+ _mi_os_reuse( p, commit_size );
+ }
+ }
+ else {
+ // no need to commit, but check if already fully committed
+ size_t already_committed = 0;
+ memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &already_committed);
+ if (!memid->initially_committed && already_committed > 0) {
+ // partially committed: as it will be committed at some time, adjust the stats and pretend the range is fully uncommitted.
+ mi_assert_internal(already_committed < needed_bcount);
+ _mi_stat_decrease(&_mi_stats_main.committed, mi_arena_block_size(already_committed));
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
+ }
+ }
+
+ return p;
+}
+
+// allocate in a specific arena
+static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
+ bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid )
+{
+ MI_UNUSED_RELEASE(alignment);
+ mi_assert(alignment <= MI_SEGMENT_ALIGN);
+ const size_t bcount = mi_block_count_of_size(size);
+ const size_t arena_index = mi_arena_id_index(arena_id);
+ mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
+ mi_assert_internal(size <= mi_arena_block_size(bcount));
+
+ // Check arena suitability
+ mi_arena_t* arena = mi_arena_from_index(arena_index);
+ if (arena == NULL) return NULL;
+ if (!allow_large && arena->is_large) return NULL;
+ if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
+ if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
+ const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
+ if (match_numa_node) { if (!numa_suitable) return NULL; }
+ else { if (numa_suitable) return NULL; }
+ }
+
+ // try to allocate
+ void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid);
+ mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
+ return p;
+}
+
+
+// allocate from an arena with fallback to the OS
+static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
+ bool commit, bool allow_large,
+ mi_arena_id_t req_arena_id, mi_memid_t* memid )
+{
+ MI_UNUSED(alignment);
+ mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ if mi_likely(max_arena == 0) return NULL;
+
+ if (req_arena_id != _mi_arena_id_none()) {
+ // try a specific arena if requested
+ if (mi_arena_id_index(req_arena_id) < max_arena) {
+ void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
+ if (p != NULL) return p;
+ }
+ }
+ else {
+ // try numa affine allocation
+ for (size_t i = 0; i < max_arena; i++) {
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
+ if (p != NULL) return p;
+ }
+
+ // try from another numa node instead..
+ if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
+ for (size_t i = 0; i < max_arena; i++) {
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
+ if (p != NULL) return p;
+ }
+ }
+ }
+ return NULL;
+}
+
+// try to reserve a fresh arena space
+static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t *arena_id)
+{
+ if (_mi_preloading()) return false; // use OS only while pre loading
+
+ const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
+ if (arena_count > (MI_MAX_ARENAS - 4)) return false;
+
+ size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
+ if (arena_reserve == 0) return false;
+
+ if (!_mi_os_has_virtual_reserve()) {
+ arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example)
+ }
+ arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
+ arena_reserve = _mi_align_up(arena_reserve, MI_SEGMENT_SIZE);
+ if (arena_count >= 8 && arena_count <= 128) {
+ // scale up the arena sizes exponentially every 8 entries (128 entries get to 589TiB)
+ const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/8, 0, 16 );
+ size_t reserve = 0;
+ if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) {
+ arena_reserve = reserve;
+ }
+ }
+ if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
+
+ // commit eagerly?
+ bool arena_commit = false;
+ if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
+ else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
+
+ return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0);
+}
+
+
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
+ mi_arena_id_t req_arena_id, mi_memid_t* memid)
+{
+ mi_assert_internal(memid != NULL);
+ mi_assert_internal(size > 0);
+ *memid = _mi_memid_none();
+
+ const int numa_node = _mi_os_numa_node(); // current numa node
+
+ // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
+ if (!mi_option_is_enabled(mi_option_disallow_arena_alloc)) { // is arena allocation allowed?
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0)
+ {
+ void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
+ if (p != NULL) return p;
+
+ // otherwise, try to first eagerly reserve a new arena
+ if (req_arena_id == _mi_arena_id_none()) {
+ mi_arena_id_t arena_id = 0;
+ if (mi_arena_reserve(size, allow_large, &arena_id)) {
+ // and try allocate in there
+ mi_assert_internal(req_arena_id == _mi_arena_id_none());
+ p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
+ if (p != NULL) return p;
+ }
+ }
+ }
+ }
+
+ // if we cannot use OS allocation, return NULL
+ if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ // finally, fall back to the OS
+ if (align_offset > 0) {
+ return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
+ }
+ else {
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
+ }
+}
+
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid)
+{
+ return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid);
+}
+
+
+void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
+ if (size != NULL) *size = 0;
+ size_t arena_index = mi_arena_id_index(arena_id);
+ if (arena_index >= MI_MAX_ARENAS) return NULL;
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
+ if (arena == NULL) return NULL;
+ if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
+ return arena->start;
+}
+
+
+/* -----------------------------------------------------------
+ Arena purge
+----------------------------------------------------------- */
+
+static long mi_arena_purge_delay(void) {
+ // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
+ return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
+}
+
+// reset or decommit in an arena and update the committed/decommit bitmaps
+// assumes we own the area (i.e. blocks_in_use is claimed by us)
+static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
+ mi_assert_internal(arena->blocks_committed != NULL);
+ mi_assert_internal(arena->blocks_purge != NULL);
+ mi_assert_internal(!arena->memid.is_pinned);
+ const size_t size = mi_arena_block_size(blocks);
+ void* const p = mi_arena_block_start(arena, bitmap_idx);
+ bool needs_recommit;
+ size_t already_committed = 0;
+ if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx, &already_committed)) {
+ // all blocks are committed, we can purge freely
+ mi_assert_internal(already_committed == blocks);
+ needs_recommit = _mi_os_purge(p, size);
+ }
+ else {
+ // some blocks are not committed -- this can happen when a partially committed block is freed
+ // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
+ // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
+ mi_assert_internal(already_committed < blocks);
+ mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
+ needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed));
+ }
+
+ // clear the purged blocks
+ _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
+ // update committed bitmap
+ if (needs_recommit) {
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
+ }
+}
+
+// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
+// Note: assumes we (still) own the area as we may purge immediately
+static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
+ mi_assert_internal(arena->blocks_purge != NULL);
+ const long delay = mi_arena_purge_delay();
+ if (delay < 0) return; // is purging allowed at all?
+
+ if (_mi_preloading() || delay == 0) {
+ // decommit directly
+ mi_arena_purge(arena, bitmap_idx, blocks);
+ }
+ else {
+ // schedule purge
+ const mi_msecs_t expire = _mi_clock_now() + delay;
+ mi_msecs_t expire0 = 0;
+ if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) {
+ // expiration was not yet set
+ // maybe set the global arenas expire as well (if it wasn't set already)
+ mi_atomic_casi64_strong_acq_rel(&mi_arenas_purge_expire, &expire0, expire);
+ }
+ else {
+ // already an expiration was set
+ }
+ _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL, NULL);
+ }
+}
+
+// purge a range of blocks
+// return true if the full range was purged.
+// assumes we own the area (i.e. blocks_in_use is claimed by us)
+static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) {
+ const size_t endidx = startidx + bitlen;
+ size_t bitidx = startidx;
+ bool all_purged = false;
+ while (bitidx < endidx) {
+ // count consecutive ones in the purge mask
+ size_t count = 0;
+ while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
+ count++;
+ }
+ if (count > 0) {
+ // found range to be purged
+ const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
+ mi_arena_purge(arena, range_idx, count);
+ if (count == bitlen) {
+ all_purged = true;
+ }
+ }
+ bitidx += (count+1); // +1 to skip the zero bit (or end)
+ }
+ return all_purged;
+}
+
+// returns true if anything was purged
+static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
+{
+ // check pre-conditions
+ if (arena->memid.is_pinned) return false;
+
+ // expired yet?
+ mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
+ if (!force && (expire == 0 || expire > now)) return false;
+
+ // reset expire (if not already set concurrently)
+ mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
+ _mi_stat_counter_increase(&_mi_stats_main.arena_purges, 1);
+
+ // potential purges scheduled, walk through the bitmap
+ bool any_purged = false;
+ bool full_purge = true;
+ for (size_t i = 0; i < arena->field_count; i++) {
+ size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
+ if (purge != 0) {
+ size_t bitidx = 0;
+ while (bitidx < MI_BITMAP_FIELD_BITS) {
+ // find consecutive range of ones in the purge mask
+ size_t bitlen = 0;
+ while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
+ bitlen++;
+ }
+ // temporarily claim the purge range as "in-use" to be thread-safe with allocation
+ // try to claim the longest range of corresponding in_use bits
+ const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
+ while( bitlen > 0 ) {
+ if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
+ break;
+ }
+ bitlen--;
+ }
+ // actual claimed bits at `in_use`
+ if (bitlen > 0) {
+ // read purge again now that we have the in_use bits
+ purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
+ if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) {
+ full_purge = false;
+ }
+ any_purged = true;
+ // release the claimed `in_use` bits again
+ _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
+ }
+ bitidx += (bitlen+1); // +1 to skip the zero (or end)
+ } // while bitidx
+ } // purge != 0
+ }
+ // if not fully purged, make sure to purge again in the future
+ if (!full_purge) {
+ const long delay = mi_arena_purge_delay();
+ mi_msecs_t expected = 0;
+ mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
+ }
+ return any_purged;
+}
+
+static void mi_arenas_try_purge( bool force, bool visit_all )
+{
+ if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
+
+ // check if any arena needs purging?
+ const mi_msecs_t now = _mi_clock_now();
+ mi_msecs_t arenas_expire = mi_atomic_loadi64_acquire(&mi_arenas_purge_expire);
+ if (!force && (arenas_expire == 0 || arenas_expire < now)) return;
+
+ const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
+ if (max_arena == 0) return;
+
+ // allow only one thread to purge at a time
+ static mi_atomic_guard_t purge_guard;
+ mi_atomic_guard(&purge_guard)
+ {
+ // increase global expire: at most one purge per delay cycle
+ mi_atomic_storei64_release(&mi_arenas_purge_expire, now + mi_arena_purge_delay());
+ size_t max_purge_count = (visit_all ? max_arena : 2);
+ bool all_visited = true;
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
+ if (arena != NULL) {
+ if (mi_arena_try_purge(arena, now, force)) {
+ if (max_purge_count <= 1) {
+ all_visited = false;
+ break;
+ }
+ max_purge_count--;
+ }
+ }
+ }
+ if (all_visited) {
+ // all arena's were visited and purged: reset global expire
+ mi_atomic_storei64_release(&mi_arenas_purge_expire, 0);
+ }
+ }
+}
+
+
+/* -----------------------------------------------------------
+ Arena free
+----------------------------------------------------------- */
+
+void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) {
+ mi_assert_internal(size > 0);
+ mi_assert_internal(committed_size <= size);
+ if (p==NULL) return;
+ if (size==0) return;
+ const bool all_committed = (committed_size == size);
+ const size_t decommitted_size = (committed_size <= size ? size - committed_size : 0);
+
+ // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
+ mi_track_mem_undefined(p,size);
+
+ if (mi_memkind_is_os(memid.memkind)) {
+ // was a direct OS allocation, pass through
+ if (!all_committed && decommitted_size > 0) {
+ // if partially committed, adjust the committed stats (as `_mi_os_free` will decrease commit by the full size)
+ _mi_stat_increase(&_mi_stats_main.committed, decommitted_size);
+ }
+ _mi_os_free(p, size, memid);
+ }
+ else if (memid.memkind == MI_MEM_ARENA) {
+ // allocated in an arena
+ size_t arena_idx;
+ size_t bitmap_idx;
+ mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
+ mi_assert_internal(arena_idx < MI_MAX_ARENAS);
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
+ mi_assert_internal(arena != NULL);
+ const size_t blocks = mi_block_count_of_size(size);
+
+ // checks
+ if (arena == NULL) {
+ _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
+ return;
+ }
+ mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
+ if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
+ _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
+ return;
+ }
+
+ // potentially decommit
+ if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
+ mi_assert_internal(all_committed);
+ }
+ else {
+ mi_assert_internal(arena->blocks_committed != NULL);
+ mi_assert_internal(arena->blocks_purge != NULL);
+
+ if (!all_committed) {
+ // mark the entire range as no longer committed (so we will recommit the full range when re-using)
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
+ mi_track_mem_noaccess(p,size);
+ //if (committed_size > 0) {
+ // if partially committed, adjust the committed stats (is it will be recommitted when re-using)
+ // in the delayed purge, we do no longer decrease the commit if the range is not marked entirely as committed.
+ _mi_stat_decrease(&_mi_stats_main.committed, committed_size);
+ //}
+ // note: if not all committed, it may be that the purge will reset/decommit the entire range
+ // that contains already decommitted parts. Since purge consistently uses reset or decommit that
+ // works (as we should never reset decommitted parts).
+ }
+ // (delay) purge the entire range
+ mi_arena_schedule_purge(arena, bitmap_idx, blocks);
+ }
+
+ // and make it available to others again
+ bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
+ if (!all_inuse) {
+ _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
+ return;
+ };
+ }
+ else {
+ // arena was none, external, or static; nothing to do
+ mi_assert_internal(memid.memkind < MI_MEM_OS);
+ }
+
+ // purge expired decommits
+ mi_arenas_try_purge(false, false);
+}
+
+// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
+// for dynamic libraries that are unloaded and need to release all their allocated memory.
+static void mi_arenas_unsafe_destroy(void) {
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ size_t new_max_arena = 0;
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
+ if (arena != NULL) {
+ mi_lock_done(&arena->abandoned_visit_lock);
+ if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
+ mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
+ _mi_os_free(arena->start, mi_arena_size(arena), arena->memid);
+ }
+ else {
+ new_max_arena = i;
+ }
+ _mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size);
+ }
+ }
+
+ // try to lower the max arena.
+ size_t expected = max_arena;
+ mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
+}
+
+// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
+void _mi_arenas_collect(bool force_purge) {
+ mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
+}
+
+// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
+// for dynamic libraries that are unloaded and need to release all their allocated memory.
+void _mi_arena_unsafe_destroy_all(void) {
+ mi_arenas_unsafe_destroy();
+ _mi_arenas_collect(true /* force purge */); // purge non-owned arenas
+}
+
+// Is a pointer inside any of our arenas?
+bool _mi_arena_contains(const void* p) {
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
+ if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* -----------------------------------------------------------
+ Add an arena.
+----------------------------------------------------------- */
+
+static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* stats) {
+ mi_assert_internal(arena != NULL);
+ mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
+ mi_assert_internal(arena->block_count > 0);
+ if (arena_id != NULL) { *arena_id = -1; }
+
+ size_t i = mi_atomic_load_relaxed(&mi_arena_count);
+ while (i < MI_MAX_ARENAS) {
+ if (mi_atomic_cas_strong_acq_rel(&mi_arena_count, &i, i+1)) {
+ _mi_stat_counter_increase(&stats->arena_count, 1);
+ arena->id = mi_arena_id_create(i);
+ mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], arena);
+ if (arena_id != NULL) { *arena_id = arena->id; }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
+{
+ if (arena_id != NULL) *arena_id = _mi_arena_id_none();
+ if (size < MI_ARENA_BLOCK_SIZE) {
+ _mi_warning_message("the arena size is too small (memory at %p with size %zu)\n", start, size);
+ return false;
+ }
+ if (is_large) {
+ mi_assert_internal(memid.initially_committed && memid.is_pinned);
+ }
+ if (!_mi_is_aligned(start, MI_SEGMENT_ALIGN)) {
+ void* const aligned_start = mi_align_up_ptr(start, MI_SEGMENT_ALIGN);
+ const size_t diff = (uint8_t*)aligned_start - (uint8_t*)start;
+ if (diff >= size || (size - diff) < MI_ARENA_BLOCK_SIZE) {
+ _mi_warning_message("after alignment, the size of the arena becomes too small (memory at %p with size %zu)\n", start, size);
+ return false;
+ }
+ start = aligned_start;
+ size = size - diff;
+ }
+
+ const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
+ const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
+ const size_t bitmaps = (memid.is_pinned ? 3 : 5);
+ const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
+ mi_memid_t meta_memid;
+ mi_arena_t* arena = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid);
+ if (arena == NULL) return false;
+
+ // already zero'd due to zalloc
+ // _mi_memzero(arena, asize);
+ arena->id = _mi_arena_id_none();
+ arena->memid = memid;
+ arena->exclusive = exclusive;
+ arena->meta_size = asize;
+ arena->meta_memid = meta_memid;
+ arena->block_count = bcount;
+ arena->field_count = fields;
+ arena->start = (uint8_t*)start;
+ arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
+ arena->is_large = is_large;
+ arena->purge_expire = 0;
+ arena->search_idx = 0;
+ mi_lock_init(&arena->abandoned_visit_lock);
+ // consecutive bitmaps
+ arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
+ arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap
+ arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap
+ arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap
+ // initialize committed bitmap?
+ if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
+ memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
+ }
+
+ // and claim leftover blocks if needed (so we never allocate there)
+ ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
+ mi_assert_internal(post >= 0);
+ if (post > 0) {
+ // don't use leftover bits at the end
+ mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
+ _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
+ }
+ return mi_arena_add(arena, arena_id, &_mi_stats_main);
+
+}
+
+bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
+ memid.initially_committed = is_committed;
+ memid.initially_zero = is_zero;
+ memid.is_pinned = is_large;
+ return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
+}
+
+// Reserve a range of regular OS memory
+int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ if (arena_id != NULL) *arena_id = _mi_arena_id_none();
+ size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
+ mi_memid_t memid;
+ void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid);
+ if (start == NULL) return ENOMEM;
+ const bool is_large = memid.is_pinned; // todo: use separate is_large field?
+ if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
+ _mi_os_free_ex(start, size, commit, memid);
+ _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
+ return ENOMEM;
+ }
+ _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
+ return 0;
+}
+
+
+// Manage a range of regular OS memory
+bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
+ return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
+}
+
+// Reserve a range of regular OS memory
+int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
+ return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
+}
+
+
+/* -----------------------------------------------------------
+ Debugging
+----------------------------------------------------------- */
+
+static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) {
+ _mi_message("%s%s:\n", prefix, header);
+ size_t bcount = 0;
+ size_t inuse_count = 0;
+ for (size_t i = 0; i < field_count; i++) {
+ char buf[MI_BITMAP_FIELD_BITS + 1];
+ uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
+ for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++, bcount++) {
+ if (bcount < block_count) {
+ bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
+ if (inuse) inuse_count++;
+ buf[bit] = (inuse ? 'x' : '.');
+ }
+ else {
+ buf[bit] = ' ';
+ }
+ }
+ buf[MI_BITMAP_FIELD_BITS] = 0;
+ _mi_message("%s %s\n", prefix, buf);
+ }
+ _mi_message("%s total ('x'): %zu\n", prefix, inuse_count);
+ return inuse_count;
+}
+
+void mi_debug_show_arenas(void) mi_attr_noexcept {
+ const bool show_inuse = true;
+ size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
+ size_t inuse_total = 0;
+ //size_t abandoned_total = 0;
+ //size_t purge_total = 0;
+ for (size_t i = 0; i < max_arenas; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
+ if (arena == NULL) break;
+ _mi_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, (size_t)(MI_ARENA_BLOCK_SIZE / MI_MiB), arena->field_count, (arena->memid.is_pinned ? ", pinned" : ""));
+ if (show_inuse) {
+ inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count);
+ }
+ if (arena->blocks_committed != NULL) {
+ mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count);
+ }
+ //if (show_abandoned) {
+ // abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count);
+ //}
+ //if (show_purge && arena->blocks_purge != NULL) {
+ // purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count);
+ //}
+ }
+ if (show_inuse) _mi_message("total inuse blocks : %zu\n", inuse_total);
+ //if (show_abandoned) _mi_message("total abandoned blocks: %zu\n", abandoned_total);
+ //if (show_purge) _mi_message("total purgeable blocks: %zu\n", purge_total);
+}
+
+
+void mi_arenas_print(void) mi_attr_noexcept {
+ mi_debug_show_arenas();
+}
+
+
+/* -----------------------------------------------------------
+ Reserve a huge page arena.
+----------------------------------------------------------- */
+// reserve at a specific numa node
+int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ if (arena_id != NULL) *arena_id = -1;
+ if (pages==0) return 0;
+ if (numa_node < -1) numa_node = -1;
+ if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
+ size_t hsize = 0;
+ size_t pages_reserved = 0;
+ mi_memid_t memid;
+ void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
+ if (p==NULL || pages_reserved==0) {
+ _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
+ return ENOMEM;
+ }
+ _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
+
+ if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
+ _mi_os_free(p, hsize, memid);
+ return ENOMEM;
+ }
+ return 0;
+}
+
+int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
+ return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
+}
+
+// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
+int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
+ if (pages == 0) return 0;
+
+ // pages per numa node
+ int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? (int)numa_nodes : _mi_os_numa_node_count());
+ if (numa_count == 0) numa_count = 1;
+ const size_t pages_per = pages / numa_count;
+ const size_t pages_mod = pages % numa_count;
+ const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
+
+ // reserve evenly among numa nodes
+ for (int numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
+ size_t node_pages = pages_per; // can be 0
+ if ((size_t)numa_node < pages_mod) node_pages++;
+ int err = mi_reserve_huge_os_pages_at(node_pages, numa_node, timeout_per);
+ if (err) return err;
+ if (pages < node_pages) {
+ pages = 0;
+ }
+ else {
+ pages -= node_pages;
+ }
+ }
+
+ return 0;
+}
+
+int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
+ MI_UNUSED(max_secs);
+ _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
+ if (pages_reserved != NULL) *pages_reserved = 0;
+ int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
+ if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
+ return err;
+}
diff --git a/compat/mimalloc/bitmap.c b/compat/mimalloc/bitmap.c
new file mode 100644
index 00000000000000..32d1e9548d3e3b
--- /dev/null
+++ b/compat/mimalloc/bitmap.c
@@ -0,0 +1,441 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* ----------------------------------------------------------------------------
+Concurrent bitmap that can set/reset sequences of bits atomically,
+represented as an array of fields where each field is a machine word (`size_t`)
+
+There are two api's; the standard one cannot have sequences that cross
+between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
+
+The `_across` postfixed functions do allow sequences that can cross over
+between the fields. (This is used in arena allocation)
+---------------------------------------------------------------------------- */
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "bitmap.h"
+
+/* -----------------------------------------------------------
+ Bitmap definition
+----------------------------------------------------------- */
+
+// The bit mask for a given number of blocks at a specified bit index.
+static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
+ mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS);
+ mi_assert_internal(count > 0);
+ if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL;
+ if (count == 0) return 0;
+ return ((((size_t)1 << count) - 1) << bitidx);
+}
+
+
+/* -----------------------------------------------------------
+ Claim a bit sequence atomically
+----------------------------------------------------------- */
+
+// Try to atomically claim a sequence of `count` bits in a single
+// field at `idx` in `bitmap`. Returns `true` on success.
+inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
+{
+ mi_assert_internal(bitmap_idx != NULL);
+ mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
+ mi_assert_internal(count > 0);
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t map = mi_atomic_load_relaxed(field);
+ if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
+
+ // search for 0-bit sequence of length count
+ const size_t mask = mi_bitmap_mask_(count, 0);
+ const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count;
+
+#ifdef MI_HAVE_FAST_BITSCAN
+ size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible
+#else
+ size_t bitidx = 0; // otherwise start at 0
+#endif
+ size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx
+
+ // scan linearly for a free range of zero bits
+ while (bitidx <= bitidx_max) {
+ const size_t mapm = (map & m);
+ if (mapm == 0) { // are the mask bits free at bitidx?
+ mi_assert_internal((m >> bitidx) == mask); // no overflow?
+ const size_t newmap = (map | m);
+ mi_assert_internal((newmap^map) >> bitidx == mask);
+ if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { // TODO: use weak cas here?
+ // no success, another thread claimed concurrently.. keep going (with updated `map`)
+ continue;
+ }
+ else {
+ // success, we claimed the bits!
+ *bitmap_idx = mi_bitmap_index_create(idx, bitidx);
+ return true;
+ }
+ }
+ else {
+ // on to the next bit range
+#ifdef MI_HAVE_FAST_BITSCAN
+ mi_assert_internal(mapm != 0);
+ const size_t shift = (count == 1 ? 1 : (MI_SIZE_BITS - mi_clz(mapm) - bitidx));
+ mi_assert_internal(shift > 0 && shift <= count);
+#else
+ const size_t shift = 1;
+#endif
+ bitidx += shift;
+ m <<= shift;
+ }
+ }
+ // no bits found
+ return false;
+}
+
+// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
+bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
+ size_t idx = start_field_idx;
+ for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
+ if (idx >= bitmap_fields) { idx = 0; } // wrap
+ if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
+bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
+ const size_t start_field_idx, const size_t count,
+ mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
+ mi_bitmap_index_t* bitmap_idx) {
+ size_t idx = start_field_idx;
+ for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
+ if (idx >= bitmap_fields) idx = 0; // wrap
+ if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
+ if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
+ return true;
+ }
+ // predicate returned false, unclaim and look further
+ _mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx);
+ }
+ }
+ return false;
+}
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ // mi_assert_internal((bitmap[idx] & mask) == mask);
+ const size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask);
+ return ((prev & mask) == mask);
+}
+
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0);
+ size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask);
+ if (any_zero != NULL) { *any_zero = ((prev & mask) != mask); }
+ return ((prev & mask) == 0);
+}
+
+// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one.
+static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ const size_t field = mi_atomic_load_relaxed(&bitmap[idx]);
+ if (any_ones != NULL) { *any_ones = ((field & mask) != 0); }
+ return ((field & mask) == mask);
+}
+
+// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
+// Returns `true` if successful when all previous `count` bits were 0.
+bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ size_t expected = mi_atomic_load_relaxed(&bitmap[idx]);
+ do {
+ if ((expected & mask) != 0) return false;
+ }
+ while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask));
+ mi_assert_internal((expected & mask) == 0);
+ return true;
+}
+
+
+bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
+}
+
+bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ bool any_ones;
+ mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
+ return any_ones;
+}
+
+
+//--------------------------------------------------------------------------
+// the `_across` functions work on bitmaps where sequences can cross over
+// between the fields. This is used in arena allocation
+//--------------------------------------------------------------------------
+
+// Try to atomically claim a sequence of `count` bits starting from the field
+// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
+// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
+static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
+{
+ mi_assert_internal(bitmap_idx != NULL);
+
+ // check initial trailing zeros
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t map = mi_atomic_load_relaxed(field);
+ const size_t initial = mi_clz(map); // count of initial zeros starting at idx
+ mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS);
+ if (initial == 0) return false;
+ if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us)
+ if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries
+
+ // scan ahead
+ size_t found = initial;
+ size_t mask = 0; // mask bits for the final field
+ while(found < count) {
+ field++;
+ map = mi_atomic_load_relaxed(field);
+ const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found));
+ mi_assert_internal(mask_bits > 0 && mask_bits <= MI_BITMAP_FIELD_BITS);
+ mask = mi_bitmap_mask_(mask_bits, 0);
+ if ((map & mask) != 0) return false; // some part is already claimed
+ found += mask_bits;
+ }
+ mi_assert_internal(field < &bitmap[bitmap_fields]);
+
+ // we found a range of contiguous zeros up to the final field; mask contains mask in the final field
+ // now try to claim the range atomically
+ mi_bitmap_field_t* const final_field = field;
+ const size_t final_mask = mask;
+ mi_bitmap_field_t* const initial_field = &bitmap[idx];
+ const size_t initial_idx = MI_BITMAP_FIELD_BITS - initial;
+ const size_t initial_mask = mi_bitmap_mask_(initial, initial_idx);
+
+ // initial field
+ size_t newmap;
+ field = initial_field;
+ map = mi_atomic_load_relaxed(field);
+ do {
+ newmap = (map | initial_mask);
+ if ((map & initial_mask) != 0) { goto rollback; };
+ } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
+
+ // intermediate fields
+ while (++field < final_field) {
+ newmap = MI_BITMAP_FIELD_FULL;
+ map = 0;
+ if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
+ }
+
+ // final field
+ mi_assert_internal(field == final_field);
+ map = mi_atomic_load_relaxed(field);
+ do {
+ newmap = (map | final_mask);
+ if ((map & final_mask) != 0) { goto rollback; }
+ } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
+
+ // claimed!
+ *bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
+ return true;
+
+rollback:
+ // roll back intermediate fields
+ // (we just failed to claim `field` so decrement first)
+ while (--field > initial_field) {
+ newmap = 0;
+ map = MI_BITMAP_FIELD_FULL;
+ mi_assert_internal(mi_atomic_load_relaxed(field) == map);
+ mi_atomic_store_release(field, newmap);
+ }
+ if (field == initial_field) { // (if we failed on the initial field, `field + 1 == initial_field`)
+ map = mi_atomic_load_relaxed(field);
+ do {
+ mi_assert_internal((map & initial_mask) == initial_mask);
+ newmap = (map & ~initial_mask);
+ } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
+ }
+ mi_stat_counter_increase(_mi_stats_main.arena_rollback_count,1);
+ // retry? (we make a recursive call instead of goto to be able to use const declarations)
+ if (retries <= 2) {
+ return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
+ }
+ else {
+ return false;
+ }
+}
+
+
+// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
+ mi_assert_internal(count > 0);
+ if (count <= 2) {
+ // we don't bother with crossover fields for small counts
+ return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx);
+ }
+
+ // visit the fields
+ size_t idx = start_field_idx;
+ for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
+ if (idx >= bitmap_fields) { idx = 0; } // wrap
+ // first try to claim inside a field
+ /*
+ if (count <= MI_BITMAP_FIELD_BITS) {
+ if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
+ return true;
+ }
+ }
+ */
+ // if that fails, then try to claim across fields
+ if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Helper for masks across fields; returns the mid count, post_mask may be 0
+static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
+ MI_UNUSED(bitmap_fields);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
+ *pre_mask = mi_bitmap_mask_(count, bitidx);
+ *mid_mask = 0;
+ *post_mask = 0;
+ mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields);
+ return 0;
+ }
+ else {
+ const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx;
+ mi_assert_internal(pre_bits < count);
+ *pre_mask = mi_bitmap_mask_(pre_bits, bitidx);
+ count -= pre_bits;
+ const size_t mid_count = (count / MI_BITMAP_FIELD_BITS);
+ *mid_mask = MI_BITMAP_FIELD_FULL;
+ count %= MI_BITMAP_FIELD_BITS;
+ *post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0));
+ mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields);
+ return mid_count;
+ }
+}
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ size_t idx = mi_bitmap_index_field(bitmap_idx);
+ size_t pre_mask;
+ size_t mid_mask;
+ size_t post_mask;
+ size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
+ bool all_one = true;
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); // clear first part
+ if ((prev & pre_mask) != pre_mask) all_one = false;
+ while(mid_count-- > 0) {
+ prev = mi_atomic_and_acq_rel(field++, ~mid_mask); // clear mid part
+ if ((prev & mid_mask) != mid_mask) all_one = false;
+ }
+ if (post_mask!=0) {
+ prev = mi_atomic_and_acq_rel(field, ~post_mask); // clear end part
+ if ((prev & post_mask) != post_mask) all_one = false;
+ }
+ return all_one;
+}
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set) {
+ size_t idx = mi_bitmap_index_field(bitmap_idx);
+ size_t pre_mask;
+ size_t mid_mask;
+ size_t post_mask;
+ size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
+ bool all_zero = true;
+ bool any_zero = false;
+ size_t one_count = 0;
+ _Atomic(size_t)*field = &bitmap[idx];
+ size_t prev = mi_atomic_or_acq_rel(field++, pre_mask);
+ if ((prev & pre_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & pre_mask); }
+ if ((prev & pre_mask) != pre_mask) any_zero = true;
+ while (mid_count-- > 0) {
+ prev = mi_atomic_or_acq_rel(field++, mid_mask);
+ if ((prev & mid_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & mid_mask); }
+ if ((prev & mid_mask) != mid_mask) any_zero = true;
+ }
+ if (post_mask!=0) {
+ prev = mi_atomic_or_acq_rel(field, post_mask);
+ if ((prev & post_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & post_mask); }
+ if ((prev & post_mask) != post_mask) any_zero = true;
+ }
+ if (pany_zero != NULL) { *pany_zero = any_zero; }
+ if (already_set != NULL) { *already_set = one_count; };
+ mi_assert_internal(all_zero ? one_count == 0 : one_count <= count);
+ return all_zero;
+}
+
+
+// Returns `true` if all `count` bits were 1.
+// `any_ones` is `true` if there was at least one bit set to one.
+static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones, size_t* already_set) {
+ size_t idx = mi_bitmap_index_field(bitmap_idx);
+ size_t pre_mask;
+ size_t mid_mask;
+ size_t post_mask;
+ size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
+ bool all_ones = true;
+ bool any_ones = false;
+ size_t one_count = 0;
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t prev = mi_atomic_load_relaxed(field++);
+ if ((prev & pre_mask) != pre_mask) all_ones = false;
+ if ((prev & pre_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & pre_mask); }
+ while (mid_count-- > 0) {
+ prev = mi_atomic_load_relaxed(field++);
+ if ((prev & mid_mask) != mid_mask) all_ones = false;
+ if ((prev & mid_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & mid_mask); }
+ }
+ if (post_mask!=0) {
+ prev = mi_atomic_load_relaxed(field);
+ if ((prev & post_mask) != post_mask) all_ones = false;
+ if ((prev & post_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & post_mask); }
+ }
+ if (pany_ones != NULL) { *pany_ones = any_ones; }
+ if (already_set != NULL) { *already_set = one_count; }
+ mi_assert_internal(all_ones ? one_count == count : one_count < count);
+ return all_ones;
+}
+
+bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set) {
+ return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL, already_set);
+}
+
+bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ bool any_ones;
+ mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones, NULL);
+ return any_ones;
+}
diff --git a/compat/mimalloc/bitmap.h b/compat/mimalloc/bitmap.h
new file mode 100644
index 00000000000000..0f4744f4fc3ffd
--- /dev/null
+++ b/compat/mimalloc/bitmap.h
@@ -0,0 +1,119 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* ----------------------------------------------------------------------------
+Concurrent bitmap that can set/reset sequences of bits atomically,
+represented as an array of fields where each field is a machine word (`size_t`)
+
+There are two api's; the standard one cannot have sequences that cross
+between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
+(this is used in region allocation)
+
+The `_across` postfixed functions do allow sequences that can cross over
+between the fields. (This is used in arena allocation)
+---------------------------------------------------------------------------- */
+#pragma once
+#ifndef MI_BITMAP_H
+#define MI_BITMAP_H
+
+/* -----------------------------------------------------------
+ Bitmap definition
+----------------------------------------------------------- */
+
+#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE)
+#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set
+
+// An atomic bitmap of `size_t` fields
+typedef _Atomic(size_t) mi_bitmap_field_t;
+typedef mi_bitmap_field_t* mi_bitmap_t;
+
+// A bitmap index is the index of the bit in a bitmap.
+typedef size_t mi_bitmap_index_t;
+
+// Create a bit index.
+static inline mi_bitmap_index_t mi_bitmap_index_create_ex(size_t idx, size_t bitidx) {
+ mi_assert_internal(bitidx <= MI_BITMAP_FIELD_BITS);
+ return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
+}
+static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) {
+ mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS);
+ return mi_bitmap_index_create_ex(idx,bitidx);
+}
+
+// Create a bit index.
+static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
+ return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
+}
+
+// Get the field index from a bit index.
+static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
+ return (bitmap_idx / MI_BITMAP_FIELD_BITS);
+}
+
+// Get the bit index in a bitmap field
+static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) {
+ return (bitmap_idx % MI_BITMAP_FIELD_BITS);
+}
+
+// Get the full bit index
+static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) {
+ return bitmap_idx;
+}
+
+/* -----------------------------------------------------------
+ Claim a bit sequence atomically
+----------------------------------------------------------- */
+
+// Try to atomically claim a sequence of `count` bits in a single
+// field at `idx` in `bitmap`. Returns `true` on success.
+bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
+
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
+bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
+
+// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
+typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg);
+bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
+// Returns `true` if successful when all previous `count` bits were 0.
+bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero);
+
+bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+
+//--------------------------------------------------------------------------
+// the `_across` functions work on bitmaps where sequences can cross over
+// between the fields. This is used in arena allocation
+//--------------------------------------------------------------------------
+
+// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set);
+
+bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set);
+bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+#endif
diff --git a/compat/mimalloc/free.c b/compat/mimalloc/free.c
new file mode 100644
index 00000000000000..0129ce83bd6c06
--- /dev/null
+++ b/compat/mimalloc/free.c
@@ -0,0 +1,588 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#if !defined(MI_IN_ALLOC_C)
+#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)"
+// add includes help an IDE
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // _mi_prim_thread_id()
+#endif
+
+// forward declarations
+static void mi_check_padding(const mi_page_t* page, const mi_block_t* block);
+static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block);
+static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block);
+static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
+
+
+// ------------------------------------------------------
+// Free
+// ------------------------------------------------------
+
+// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
+static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block);
+
+// regular free of a (thread local) block pointer
+// fast path written carefully to prevent spilling on the stack
+static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)
+{
+ // checks
+ if mi_unlikely(mi_check_is_double_free(page, block)) return;
+ mi_check_padding(page, block);
+ if (track_stats) { mi_stat_free(page, block); }
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED
+ if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
+ memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
+ }
+ #endif
+ if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
+
+ // actual free: push on the local free list
+ mi_block_set_next(page, block, page->local_free);
+ page->local_free = block;
+ if mi_unlikely(--page->used == 0) {
+ _mi_page_retire(page);
+ }
+ else if mi_unlikely(check_full && mi_page_is_in_full(page)) {
+ _mi_page_unfull(page);
+ }
+}
+
+// Adjust a block that was allocated aligned, to the actual start of the block in the page.
+// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
+// `page_start` and `block_size` fields; however these are constant and the page won't be
+// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
+mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
+ mi_assert_internal(page!=NULL && p!=NULL);
+
+ size_t diff = (uint8_t*)p - page->page_start;
+ size_t adjust;
+ if mi_likely(page->block_size_shift != 0) {
+ adjust = diff & (((size_t)1 << page->block_size_shift) - 1);
+ }
+ else {
+ adjust = diff % mi_page_block_size(page);
+ }
+
+ return (mi_block_t*)((uintptr_t)p - adjust);
+}
+
+// forward declaration for a MI_GUARDED build
+#if MI_GUARDED
+static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p); // forward declaration
+static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard(page, block, p); }
+}
+#else
+static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p);
+}
+#endif
+
+// free a local pointer (page parameter comes first for better codegen)
+static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
+ MI_UNUSED(segment);
+ mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
+ mi_block_check_unguard(page, block, p);
+ mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
+}
+
+// free a pointer owned by another thread (page parameter comes first for better codegen)
+static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
+ mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
+ mi_block_check_unguard(page, block, p);
+ mi_free_block_mt(page, segment, block);
+}
+
+// generic free (for runtime integration)
+void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
+ if (is_local) mi_free_generic_local(page,segment,p);
+ else mi_free_generic_mt(page,segment,p);
+}
+
+// Get the segment data belonging to a pointer
+// This is just a single `and` in release mode but does further checks in debug mode
+// (and secure mode) to see if this was a valid pointer.
+static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
+{
+ MI_UNUSED(msg);
+
+ #if (MI_DEBUG>0)
+ if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0 && !mi_option_is_enabled(mi_option_guarded_precise)) {
+ _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
+ return NULL;
+ }
+ #endif
+
+ mi_segment_t* const segment = _mi_ptr_segment(p);
+ if mi_unlikely(segment==NULL) return segment;
+
+ #if (MI_DEBUG>0)
+ if mi_unlikely(!mi_is_in_heap_region(p)) {
+ #if (MI_INTPTR_SIZE == 8 && defined(__linux__))
+ if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
+ #else
+ {
+ #endif
+ _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
+ "(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
+ if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
+ _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
+ }
+ }
+ }
+ #endif
+ #if (MI_DEBUG>0 || MI_SECURE>=4)
+ if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
+ _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
+ return NULL;
+ }
+ #endif
+
+ return segment;
+}
+
+// Free a block
+// Fast path written carefully to prevent register spilling on the stack
+static inline void mi_free_ex(void* p, size_t* usable) mi_attr_noexcept
+{
+ mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
+ if mi_unlikely(segment==NULL) return;
+
+ const bool is_local = (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
+ mi_page_t* const page = _mi_segment_page_of(segment, p);
+ if (usable!=NULL) { *usable = mi_page_usable_block_size(page); }
+
+ if mi_likely(is_local) { // thread-local free?
+ if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
+ // thread-local, aligned, and not a full page
+ mi_block_t* const block = (mi_block_t*)p;
+ mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */);
+ }
+ else {
+ // page is full or contains (inner) aligned blocks; use generic path
+ mi_free_generic_local(page, segment, p);
+ }
+ }
+ else {
+ // not thread-local; use generic path
+ mi_free_generic_mt(page, segment, p);
+ }
+}
+
+void mi_free(void* p) mi_attr_noexcept {
+ mi_free_ex(p,NULL);
+}
+
+void mi_ufree(void* p, size_t* usable) mi_attr_noexcept {
+ mi_free_ex(p,usable);
+}
+
+// return true if successful
+bool _mi_free_delayed_block(mi_block_t* block) {
+ // get segment and page
+ mi_assert_internal(block!=NULL);
+ const mi_segment_t* const segment = _mi_ptr_segment(block);
+ mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
+ mi_assert_internal(_mi_thread_id() == segment->thread_id);
+ mi_page_t* const page = _mi_segment_page_of(segment, block);
+
+ // Clear the no-delayed flag so delayed freeing is used again for this page.
+ // This must be done before collecting the free lists on this page -- otherwise
+ // some blocks may end up in the page `thread_free` list with no blocks in the
+ // heap `thread_delayed_free` list which may cause the page to be never freed!
+ // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
+ if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
+ return false;
+ }
+
+ // collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count
+ _mi_page_free_collect(page, false);
+
+ // and free the block (possibly freeing the page as well since `used` is updated)
+ mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */);
+ return true;
+}
+
+// ------------------------------------------------------
+// Multi-threaded Free (`_mt`)
+// ------------------------------------------------------
+
+// Push a block that is owned by another thread on its page-local thread free
+// list or it's heap delayed free list. Such blocks are later collected by
+// the owning thread in `_mi_free_delayed_block`.
+static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block )
+{
+ // Try to put the block on either the page-local thread free list,
+ // or the heap delayed free list (if this is the first non-local free in that page)
+ mi_thread_free_t tfreex;
+ bool use_delayed;
+ mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
+ do {
+ use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
+ if mi_unlikely(use_delayed) {
+ // unlikely: this only happens on the first concurrent free in a page that is in the full list
+ tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
+ }
+ else {
+ // usual: directly add to page thread_free list
+ mi_block_set_next(page, block, mi_tf_block(tfree));
+ tfreex = mi_tf_set_block(tfree,block);
+ }
+ } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
+
+ // If this was the first non-local free, we need to push it on the heap delayed free list instead
+ if mi_unlikely(use_delayed) {
+ // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
+ mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
+ mi_assert_internal(heap != NULL);
+ if (heap != NULL) {
+ // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
+ mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
+ do {
+ mi_block_set_nextx(heap,block,dfree, heap->keys);
+ } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
+ }
+
+ // and reset the MI_DELAYED_FREEING flag
+ tfree = mi_atomic_load_relaxed(&page->xthread_free);
+ do {
+ tfreex = tfree;
+ mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
+ tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
+ } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
+ }
+}
+
+// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
+static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block)
+{
+ // first see if the segment was abandoned and if we can reclaim it into our thread
+ if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 &&
+ #if MI_HUGE_PAGE_ABANDON
+ segment->page_kind != MI_PAGE_HUGE &&
+ #endif
+ mi_atomic_load_relaxed(&segment->thread_id) == 0 && // segment is abandoned?
+ mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
+ {
+ // the segment is abandoned, try to reclaim it into our heap
+ if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) {
+ mi_assert_internal(_mi_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
+ mi_assert_internal(mi_heap_get_default()->tld->segments.subproc == segment->subproc);
+ mi_free(block); // recursively free as now it will be a local free in our heap
+ return;
+ }
+ }
+
+ // The padding check may access the non-thread-owned page for the key values.
+ // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
+ mi_check_padding(page, block);
+
+ // adjust stats (after padding check and potentially recursive `mi_free` above)
+ mi_stat_free(page, block); // stat_free may access the padding
+ mi_track_free_size(block, mi_page_usable_size_of(page,block));
+
+ // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
+ _mi_padding_shrink(page, block, sizeof(mi_block_t));
+
+ if (segment->kind == MI_SEGMENT_HUGE) {
+ #if MI_HUGE_PAGE_ABANDON
+ // huge page segments are always abandoned and can be freed immediately
+ _mi_segment_huge_page_free(segment, page, block);
+ return;
+ #else
+ // huge pages are special as they occupy the entire segment
+ // as these are large we reset the memory occupied by the page so it is available to other threads
+ // (as the owning thread needs to actually free the memory later).
+ _mi_segment_huge_page_reset(segment, page, block);
+ #endif
+ }
+ else {
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
+ memset(block, MI_DEBUG_FREED, mi_usable_size(block));
+ #endif
+ }
+
+ // and finally free the actual block by pushing it on the owning heap
+ // thread_delayed free list (or heap delayed free list)
+ mi_free_block_delayed_mt(page,block);
+}
+
+
+// ------------------------------------------------------
+// Usable size
+// ------------------------------------------------------
+
+// Bytes available in a block
+static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept {
+ const mi_block_t* block = _mi_page_ptr_unalign(page, p);
+ const size_t size = mi_page_usable_size_of(page, block);
+ const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
+ mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
+ const size_t aligned_size = (size - adjust);
+ #if MI_GUARDED
+ if (mi_block_ptr_is_guarded(block, p)) {
+ return aligned_size - _mi_os_page_size();
+ }
+ #endif
+ return aligned_size;
+}
+
+static inline mi_page_t* mi_validate_ptr_page(const void* p, const char* msg) {
+ const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
+ if mi_unlikely(segment==NULL) return NULL;
+ mi_page_t* const page = _mi_segment_page_of(segment, p);
+ return page;
+}
+
+static inline size_t _mi_usable_size(const void* p, const mi_page_t* page) mi_attr_noexcept {
+ if mi_unlikely(page==NULL) return 0;
+ if mi_likely(!mi_page_has_aligned(page)) {
+ const mi_block_t* block = (const mi_block_t*)p;
+ return mi_page_usable_size_of(page, block);
+ }
+ else {
+ // split out to separate routine for improved code generation
+ return mi_page_usable_aligned_size_of(page, p);
+ }
+}
+
+mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
+ const mi_page_t* const page = mi_validate_ptr_page(p,"mi_usable_size");
+ return _mi_usable_size(p,page);
+}
+
+
+// ------------------------------------------------------
+// Free variants
+// ------------------------------------------------------
+
+void mi_free_size(void* p, size_t size) mi_attr_noexcept {
+ MI_UNUSED_RELEASE(size);
+ #if MI_DEBUG
+ const mi_page_t* const page = mi_validate_ptr_page(p,"mi_free_size");
+ const size_t available = _mi_usable_size(p,page);
+ mi_assert(p == NULL || size <= available || available == 0 /* invalid pointer */ );
+ #endif
+ mi_free(p);
+}
+
+void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
+ MI_UNUSED_RELEASE(alignment);
+ mi_assert(((uintptr_t)p % alignment) == 0);
+ mi_free_size(p,size);
+}
+
+void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
+ MI_UNUSED_RELEASE(alignment);
+ mi_assert(((uintptr_t)p % alignment) == 0);
+ mi_free(p);
+}
+
+
+// ------------------------------------------------------
+// Check for double free in secure and debug mode
+// This is somewhat expensive so only enabled for secure mode 4
+// ------------------------------------------------------
+
+#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
+// linear check if the free list contains a specific element
+static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
+ while (list != NULL) {
+ if (elem==list) return true;
+ list = mi_block_next(page, list);
+ }
+ return false;
+}
+
+static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
+ // The decoded value is in the same page (or NULL).
+ // Walk the free lists to verify positively if it is already freed
+ if (mi_list_contains(page, page->free, block) ||
+ mi_list_contains(page, page->local_free, block) ||
+ mi_list_contains(page, mi_page_thread_free(page), block))
+ {
+ _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
+ return true;
+ }
+ return false;
+}
+
+#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
+
+static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
+ bool is_double_free = false;
+ mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
+ if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
+ (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
+ {
+ // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free?
+ // (continue in separate function to improve code generation)
+ is_double_free = mi_check_is_double_freex(page, block);
+ }
+ return is_double_free;
+}
+#else
+static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(page);
+ MI_UNUSED(block);
+ return false;
+}
+#endif
+
+
+// ---------------------------------------------------------------------------
+// Check for heap block overflow by setting up padding at the end of the block
+// ---------------------------------------------------------------------------
+
+#if MI_PADDING // && !MI_TRACK_ENABLED
+static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
+ *bsize = mi_page_usable_block_size(page);
+ const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
+ mi_track_mem_defined(padding,sizeof(mi_padding_t));
+ *delta = padding->delta;
+ uint32_t canary = padding->canary;
+ uintptr_t keys[2];
+ keys[0] = page->keys[0];
+ keys[1] = page->keys[1];
+ bool ok = (mi_ptr_encode_canary(page,block,keys) == canary && *delta <= *bsize);
+ mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
+ return ok;
+}
+
+// Return the exact usable size of a block.
+static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
+ size_t bsize;
+ size_t delta;
+ bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
+ mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
+ return (ok ? bsize - delta : 0);
+}
+
+// When a non-thread-local block is freed, it becomes part of the thread delayed free
+// list that is freed later by the owning heap. If the exact usable size is too small to
+// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
+// so it will later not trigger an overflow error in `mi_free_block`.
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
+ size_t bsize;
+ size_t delta;
+ bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
+ mi_assert_internal(ok);
+ if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
+ mi_assert_internal(bsize >= min_size);
+ if (bsize < min_size) return; // should never happen
+ size_t new_delta = (bsize - min_size);
+ mi_assert_internal(new_delta < bsize);
+ mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
+ mi_track_mem_defined(padding,sizeof(mi_padding_t));
+ padding->delta = (uint32_t)new_delta;
+ mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
+}
+#else
+static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(block);
+ return mi_page_usable_block_size(page);
+}
+
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
+ MI_UNUSED(page);
+ MI_UNUSED(block);
+ MI_UNUSED(min_size);
+}
+#endif
+
+#if MI_PADDING && MI_PADDING_CHECK
+
+static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
+ size_t bsize;
+ size_t delta;
+ bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
+ *size = *wrong = bsize;
+ if (!ok) return false;
+ mi_assert_internal(bsize >= delta);
+ *size = bsize - delta;
+ if (!mi_page_is_huge(page)) {
+ uint8_t* fill = (uint8_t*)block + bsize - delta;
+ const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
+ mi_track_mem_defined(fill, maxpad);
+ for (size_t i = 0; i < maxpad; i++) {
+ if (fill[i] != MI_DEBUG_PADDING) {
+ *wrong = bsize - delta + i;
+ ok = false;
+ break;
+ }
+ }
+ mi_track_mem_noaccess(fill, maxpad);
+ }
+ return ok;
+}
+
+static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
+ size_t size;
+ size_t wrong;
+ if (!mi_verify_padding(page,block,&size,&wrong)) {
+ _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
+ }
+}
+
+#else
+
+static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(page);
+ MI_UNUSED(block);
+}
+
+#endif
+
+// only maintain stats for smaller objects if requested
+#if (MI_STAT>0)
+static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(block);
+ mi_heap_t* const heap = mi_heap_get_default();
+ const size_t bsize = mi_page_usable_block_size(page);
+ // #if (MI_STAT>1)
+ // const size_t usize = mi_page_usable_size_of(page, block);
+ // mi_heap_stat_decrease(heap, malloc_requested, usize);
+ // #endif
+ if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, malloc_normal, bsize);
+ #if (MI_STAT > 1)
+ mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1);
+ #endif
+ }
+ //else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ // mi_heap_stat_decrease(heap, malloc_large, bsize);
+ //}
+ else {
+ mi_heap_stat_decrease(heap, malloc_huge, bsize);
+ }
+}
+#else
+static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(page); MI_UNUSED(block);
+}
+#endif
+
+
+// Remove guard page when building with MI_GUARDED
+#if MI_GUARDED
+static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ MI_UNUSED(p);
+ mi_assert_internal(mi_block_ptr_is_guarded(block, p));
+ mi_assert_internal(mi_page_has_aligned(page));
+ mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t));
+ mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED);
+
+ const size_t bsize = mi_page_block_size(page);
+ const size_t psize = _mi_os_page_size();
+ mi_assert_internal(bsize > psize);
+ mi_assert_internal(_mi_page_segment(page)->allow_decommit);
+ void* gpage = (uint8_t*)block + bsize - psize;
+ mi_assert_internal(_mi_is_aligned(gpage, psize));
+ _mi_os_unprotect(gpage, psize);
+}
+#endif
diff --git a/compat/mimalloc/heap.c b/compat/mimalloc/heap.c
new file mode 100644
index 00000000000000..88969311e89586
--- /dev/null
+++ b/compat/mimalloc/heap.c
@@ -0,0 +1,737 @@
+/*----------------------------------------------------------------------------
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // mi_prim_get_default_heap
+
+#include // memset, memcpy
+
+#if defined(_MSC_VER) && (_MSC_VER < 1920)
+#pragma warning(disable:4204) // non-constant aggregate initializer
+#endif
+
+/* -----------------------------------------------------------
+ Helpers
+----------------------------------------------------------- */
+
+// return `true` if ok, `false` to break
+typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);
+
+// Visit all pages in a heap; returns `false` if break was called.
+static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
+{
+ if (heap==NULL || heap->page_count==0) return 0;
+
+ // visit all pages
+ #if MI_DEBUG>1
+ size_t total = heap->page_count;
+ size_t count = 0;
+ #endif
+
+ for (size_t i = 0; i <= MI_BIN_FULL; i++) {
+ mi_page_queue_t* pq = &heap->pages[i];
+ mi_page_t* page = pq->first;
+ while(page != NULL) {
+ mi_page_t* next = page->next; // save next in case the page gets removed from the queue
+ mi_assert_internal(mi_page_heap(page) == heap);
+ #if MI_DEBUG>1
+ count++;
+ #endif
+ if (!fn(heap, pq, page, arg1, arg2)) return false;
+ page = next; // and continue
+ }
+ }
+ mi_assert_internal(count == total);
+ return true;
+}
+
+
+#if MI_DEBUG>=2
+static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
+ MI_UNUSED(arg1);
+ MI_UNUSED(arg2);
+ MI_UNUSED(pq);
+ mi_assert_internal(mi_page_heap(page) == heap);
+ mi_segment_t* segment = _mi_page_segment(page);
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == heap->thread_id);
+ mi_assert_expensive(_mi_page_is_valid(page));
+ return true;
+}
+#endif
+#if MI_DEBUG>=3
+static bool mi_heap_is_valid(mi_heap_t* heap) {
+ mi_assert_internal(heap!=NULL);
+ mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
+ return true;
+}
+#endif
+
+
+
+
+/* -----------------------------------------------------------
+ "Collect" pages by migrating `local_free` and `thread_free`
+ lists and freeing empty pages. This is done when a thread
+ stops (and in that case abandons pages if there are still
+ blocks alive)
+----------------------------------------------------------- */
+
+typedef enum mi_collect_e {
+ MI_NORMAL,
+ MI_FORCE,
+ MI_ABANDON
+} mi_collect_t;
+
+
+static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
+ MI_UNUSED(arg2);
+ MI_UNUSED(heap);
+ mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
+ mi_collect_t collect = *((mi_collect_t*)arg_collect);
+ _mi_page_free_collect(page, collect >= MI_FORCE);
+ if (collect == MI_FORCE) {
+ // note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.
+ mi_segment_t* segment = _mi_page_segment(page);
+ _mi_segment_collect(segment, true /* force? */);
+ }
+ if (mi_page_all_free(page)) {
+ // no more used blocks, free the page.
+ // note: this will free retired pages as well.
+ _mi_page_free(page, pq, collect >= MI_FORCE);
+ }
+ else if (collect == MI_ABANDON) {
+ // still used blocks but the thread is done; abandon the page
+ _mi_page_abandon(page, pq);
+ }
+ return true; // don't break
+}
+
+static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
+ MI_UNUSED(arg1);
+ MI_UNUSED(arg2);
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
+ _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
+ return true; // don't break
+}
+
+static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
+{
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
+
+ const bool force = (collect >= MI_FORCE);
+ _mi_deferred_free(heap, force);
+
+ // python/cpython#112532: we may be called from a thread that is not the owner of the heap
+ const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
+
+ // note: never reclaim on collect but leave it to threads that need storage to reclaim
+ const bool force_main =
+ #ifdef NDEBUG
+ collect == MI_FORCE
+ #else
+ collect >= MI_FORCE
+ #endif
+ && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
+
+ if (force_main) {
+ // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
+ // if all memory is freed by now, all segments should be freed.
+ // note: this only collects in the current subprocess
+ _mi_abandoned_reclaim_all(heap, &heap->tld->segments);
+ }
+
+ // if abandoning, mark all pages to no longer add to delayed_free
+ if (collect == MI_ABANDON) {
+ mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
+ }
+
+ // free all current thread delayed blocks.
+ // (if abandoning, after this there are no more thread-delayed references into the pages.)
+ _mi_heap_delayed_free_all(heap);
+
+ // collect retired pages
+ _mi_heap_collect_retired(heap, force);
+
+ // collect all pages owned by this thread
+ mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
+ mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
+
+ // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
+ // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
+ _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
+
+ // if forced, collect thread data cache on program-exit (or shared library unload)
+ if (force && is_main_thread && mi_heap_is_backing(heap)) {
+ _mi_thread_data_collect(); // collect thread data cache
+ }
+
+ // collect arenas (this is program wide so don't force purges on abandonment of threads)
+ _mi_arenas_collect(collect == MI_FORCE /* force purge? */);
+
+ // merge statistics
+ if (collect <= MI_FORCE) { _mi_stats_merge_thread(heap->tld); }
+}
+
+void _mi_heap_collect_abandon(mi_heap_t* heap) {
+ mi_heap_collect_ex(heap, MI_ABANDON);
+}
+
+void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
+ mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
+}
+
+void mi_collect(bool force) mi_attr_noexcept {
+ mi_heap_collect(mi_prim_get_default_heap(), force);
+}
+
+
+/* -----------------------------------------------------------
+ Heap new
+----------------------------------------------------------- */
+
+mi_heap_t* mi_heap_get_default(void) {
+ mi_thread_init();
+ return mi_prim_get_default_heap();
+}
+
+static bool mi_heap_is_default(const mi_heap_t* heap) {
+ return (heap == mi_prim_get_default_heap());
+}
+
+
+mi_heap_t* mi_heap_get_backing(void) {
+ mi_heap_t* heap = mi_heap_get_default();
+ mi_assert_internal(heap!=NULL);
+ mi_heap_t* bheap = heap->tld->heap_backing;
+ mi_assert_internal(bheap!=NULL);
+ mi_assert_internal(bheap->thread_id == _mi_thread_id());
+ return bheap;
+}
+
+void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag) {
+ _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
+ heap->tld = tld;
+ heap->thread_id = _mi_thread_id();
+ heap->arena_id = arena_id;
+ heap->no_reclaim = noreclaim;
+ heap->tag = tag;
+ if (heap == tld->heap_backing) {
+ #if defined(_WIN32) && !defined(MI_SHARED_LIB)
+ _mi_random_init_weak(&heap->random); // prevent allocation failure during bcrypt dll initialization with static linking (issue #1185)
+ #else
+ _mi_random_init(&heap->random);
+ #endif
+ }
+ else {
+ _mi_random_split(&tld->heap_backing->random, &heap->random);
+ }
+ heap->cookie = _mi_heap_random_next(heap) | 1;
+ heap->keys[0] = _mi_heap_random_next(heap);
+ heap->keys[1] = _mi_heap_random_next(heap);
+ _mi_heap_guarded_init(heap);
+ // push on the thread local heaps list
+ heap->next = heap->tld->heaps;
+ heap->tld->heaps = heap;
+}
+
+mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id) {
+ mi_heap_t* bheap = mi_heap_get_backing();
+ mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
+ if (heap == NULL) return NULL;
+ mi_assert(heap_tag >= 0 && heap_tag < 256);
+ _mi_heap_init(heap, bheap->tld, arena_id, allow_destroy /* no reclaim? */, (uint8_t)heap_tag /* heap tag */);
+ return heap;
+}
+
+mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
+ return mi_heap_new_ex(0 /* default heap tag */, false /* don't allow `mi_heap_destroy` */, arena_id);
+}
+
+mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
+ // don't reclaim abandoned memory or otherwise destroy is unsafe
+ return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none());
+}
+
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
+ return _mi_arena_memid_is_suitable(memid, heap->arena_id);
+}
+
+uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
+ return _mi_random_next(&heap->random);
+}
+
+// zero out the page queues
+static void mi_heap_reset_pages(mi_heap_t* heap) {
+ mi_assert_internal(heap != NULL);
+ mi_assert_internal(mi_heap_is_initialized(heap));
+ // TODO: copy full empty heap instead?
+ memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
+ _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
+ heap->thread_delayed_free = NULL;
+ heap->page_count = 0;
+}
+
+// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
+static void mi_heap_free(mi_heap_t* heap) {
+ mi_assert(heap != NULL);
+ mi_assert_internal(mi_heap_is_initialized(heap));
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
+ if (mi_heap_is_backing(heap)) return; // dont free the backing heap
+
+ // reset default
+ if (mi_heap_is_default(heap)) {
+ _mi_heap_set_default_direct(heap->tld->heap_backing);
+ }
+
+ // remove ourselves from the thread local heaps list
+ // linear search but we expect the number of heaps to be relatively small
+ mi_heap_t* prev = NULL;
+ mi_heap_t* curr = heap->tld->heaps;
+ while (curr != heap && curr != NULL) {
+ prev = curr;
+ curr = curr->next;
+ }
+ mi_assert_internal(curr == heap);
+ if (curr == heap) {
+ if (prev != NULL) { prev->next = heap->next; }
+ else { heap->tld->heaps = heap->next; }
+ }
+ mi_assert_internal(heap->tld->heaps != NULL);
+
+ // and free the used memory
+ mi_free(heap);
+}
+
+// return a heap on the same thread as `heap` specialized for the specified tag (if it exists)
+mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) {
+ if (heap->tag == tag) {
+ return heap;
+ }
+ for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) {
+ if (curr->tag == tag) {
+ return curr;
+ }
+ }
+ return NULL;
+}
+
+/* -----------------------------------------------------------
+ Heap destroy
+----------------------------------------------------------- */
+
+static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
+ MI_UNUSED(arg1);
+ MI_UNUSED(arg2);
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
+
+ // ensure no more thread_delayed_free will be added
+ _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
+
+ // stats
+ const size_t bsize = mi_page_block_size(page);
+ if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
+ //if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ // mi_heap_stat_decrease(heap, malloc_large, bsize);
+ //}
+ //else
+ {
+ mi_heap_stat_decrease(heap, malloc_huge, bsize);
+ }
+ }
+ #if (MI_STAT>0)
+ _mi_page_free_collect(page, false); // update used count
+ const size_t inuse = page->used;
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse);
+ #if (MI_STAT>1)
+ mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse);
+ #endif
+ }
+ // mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
+ #endif
+
+ /// pretend it is all free now
+ mi_assert_internal(mi_page_thread_free(page) == NULL);
+ page->used = 0;
+
+ // and free the page
+ // mi_page_free(page,false);
+ page->next = NULL;
+ page->prev = NULL;
+ _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
+
+ return true; // keep going
+}
+
+void _mi_heap_destroy_pages(mi_heap_t* heap) {
+ mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);
+ mi_heap_reset_pages(heap);
+}
+
+#if MI_TRACK_HEAP_DESTROY
+static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
+ MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
+ mi_track_free_size(block,mi_usable_size(block));
+ return true;
+}
+#endif
+
+void mi_heap_destroy(mi_heap_t* heap) {
+ mi_assert(heap != NULL);
+ mi_assert(mi_heap_is_initialized(heap));
+ mi_assert(heap->no_reclaim);
+ mi_assert_expensive(mi_heap_is_valid(heap));
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
+ #if MI_GUARDED
+ // _mi_warning_message("'mi_heap_destroy' called but MI_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap);
+ mi_heap_delete(heap);
+ return;
+ #else
+ if (!heap->no_reclaim) {
+ _mi_warning_message("'mi_heap_destroy' called but ignored as the heap was not created with 'allow_destroy' (heap at %p)\n", heap);
+ // don't free in case it may contain reclaimed pages
+ mi_heap_delete(heap);
+ }
+ else {
+ // track all blocks as freed
+ #if MI_TRACK_HEAP_DESTROY
+ mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
+ #endif
+ // free all pages
+ _mi_heap_destroy_pages(heap);
+ mi_heap_free(heap);
+ }
+ #endif
+}
+
+// forcefully destroy all heaps in the current thread
+void _mi_heap_unsafe_destroy_all(mi_heap_t* heap) {
+ mi_assert_internal(heap != NULL);
+ if (heap == NULL) return;
+ mi_heap_t* curr = heap->tld->heaps;
+ while (curr != NULL) {
+ mi_heap_t* next = curr->next;
+ if (curr->no_reclaim) {
+ mi_heap_destroy(curr);
+ }
+ else {
+ _mi_heap_destroy_pages(curr);
+ }
+ curr = next;
+ }
+}
+
+/* -----------------------------------------------------------
+ Safe Heap delete
+----------------------------------------------------------- */
+
+// Transfer the pages from one heap to the other
+static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
+ mi_assert_internal(heap!=NULL);
+ if (from==NULL || from->page_count == 0) return;
+
+ // reduce the size of the delayed frees
+ _mi_heap_delayed_free_partial(from);
+
+ // transfer all pages by appending the queues; this will set a new heap field
+ // so threads may do delayed frees in either heap for a while.
+ // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
+ // so after this only the new heap will get delayed frees
+ for (size_t i = 0; i <= MI_BIN_FULL; i++) {
+ mi_page_queue_t* pq = &heap->pages[i];
+ mi_page_queue_t* append = &from->pages[i];
+ size_t pcount = _mi_page_queue_append(heap, pq, append);
+ heap->page_count += pcount;
+ from->page_count -= pcount;
+ }
+ mi_assert_internal(from->page_count == 0);
+
+ // and do outstanding delayed frees in the `from` heap
+ // note: be careful here as the `heap` field in all those pages no longer point to `from`,
+ // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
+ // the regular `_mi_free_delayed_block` which is safe.
+ _mi_heap_delayed_free_all(from);
+ #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
+ mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
+ #endif
+
+ // and reset the `from` heap
+ mi_heap_reset_pages(from);
+}
+
+// are two heaps compatible with respect to heap-tag, exclusive arena etc.
+static bool mi_heaps_are_compatible(mi_heap_t* heap1, mi_heap_t* heap2) {
+ return (heap1->tag == heap2->tag && // store same kind of objects
+ heap1->arena_id == heap2->arena_id); // same arena preference
+}
+
+// Safe delete a heap without freeing any still allocated blocks in that heap.
+void mi_heap_delete(mi_heap_t* heap)
+{
+ mi_assert(heap != NULL);
+ mi_assert(mi_heap_is_initialized(heap));
+ mi_assert_expensive(mi_heap_is_valid(heap));
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
+
+ mi_heap_t* bheap = heap->tld->heap_backing;
+ if (bheap != heap && mi_heaps_are_compatible(bheap,heap)) {
+ // transfer still used pages to the backing heap
+ mi_heap_absorb(bheap, heap);
+ }
+ else {
+ // the backing heap abandons its pages
+ _mi_heap_collect_abandon(heap);
+ }
+ mi_assert_internal(heap->page_count==0);
+ mi_heap_free(heap);
+}
+
+mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
+ mi_assert(heap != NULL);
+ mi_assert(mi_heap_is_initialized(heap));
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
+ mi_assert_expensive(mi_heap_is_valid(heap));
+ mi_heap_t* old = mi_prim_get_default_heap();
+ _mi_heap_set_default_direct(heap);
+ return old;
+}
+
+
+
+
+/* -----------------------------------------------------------
+ Analysis
+----------------------------------------------------------- */
+
+// static since it is not thread safe to access heaps from other threads.
+static mi_heap_t* mi_heap_of_block(const void* p) {
+ if (p == NULL) return NULL;
+ mi_segment_t* segment = _mi_ptr_segment(p);
+ bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
+ mi_assert_internal(valid);
+ if mi_unlikely(!valid) return NULL;
+ return mi_page_heap(_mi_segment_page_of(segment,p));
+}
+
+bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
+ mi_assert(heap != NULL);
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
+ return (heap == mi_heap_of_block(p));
+}
+
+
+static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
+ bool* found = (bool*)vfound;
+ void* start = mi_page_start(page);
+ void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
+ *found = (p >= start && p < end);
+ return (!*found); // continue if not found
+}
+
+bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
+ mi_assert(heap != NULL);
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
+ if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
+ bool found = false;
+ mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
+ return found;
+}
+
+bool mi_check_owned(const void* p) {
+ return mi_heap_check_owned(mi_prim_get_default_heap(), p);
+}
+
+/* -----------------------------------------------------------
+ Visit all heap blocks and areas
+ Todo: enable visiting abandoned pages, and
+ enable visiting all blocks of all heaps across threads
+----------------------------------------------------------- */
+
+void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
+ const size_t bsize = mi_page_block_size(page);
+ const size_t ubsize = mi_page_usable_block_size(page);
+ area->reserved = page->reserved * bsize;
+ area->committed = page->capacity * bsize;
+ area->blocks = mi_page_start(page);
+ area->used = page->used; // number of blocks in use (#553)
+ area->block_size = ubsize;
+ area->full_block_size = bsize;
+ area->heap_tag = page->heap_tag;
+}
+
+
+static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) {
+ mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
+ *shift = MI_SIZE_BITS - mi_clz(divisor - 1);
+ *magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
+}
+
+static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) {
+ mi_assert_internal(n <= UINT32_MAX);
+ const uint64_t hi = ((uint64_t)n * magic) >> 32;
+ return (size_t)((hi + n) >> shift);
+}
+
+bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg) {
+ mi_assert(area != NULL);
+ if (area==NULL) return true;
+ mi_assert(page != NULL);
+ if (page == NULL) return true;
+
+ _mi_page_free_collect(page,true); // collect both thread_delayed and local_free
+ mi_assert_internal(page->local_free == NULL);
+ if (page->used == 0) return true;
+
+ size_t psize;
+ uint8_t* const pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
+ mi_heap_t* const heap = mi_page_heap(page);
+ const size_t bsize = mi_page_block_size(page);
+ const size_t ubsize = mi_page_usable_block_size(page); // without padding
+
+ // optimize page with one block
+ if (page->capacity == 1) {
+ mi_assert_internal(page->used == 1 && page->free == NULL);
+ return visitor(mi_page_heap(page), area, pstart, ubsize, arg);
+ }
+ mi_assert(bsize <= UINT32_MAX);
+
+ // optimize full pages
+ if (page->used == page->capacity) {
+ uint8_t* block = pstart;
+ for (size_t i = 0; i < page->capacity; i++) {
+ if (!visitor(heap, area, block, ubsize, arg)) return false;
+ block += bsize;
+ }
+ return true;
+ }
+
+ // create a bitmap of free blocks.
+ #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
+ uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
+ const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS);
+ memset(free_map, 0, bmapsize * sizeof(intptr_t));
+ if (page->capacity % MI_INTPTR_BITS != 0) {
+ // mark left-over bits at the end as free
+ size_t shift = (page->capacity % MI_INTPTR_BITS);
+ uintptr_t mask = (UINTPTR_MAX << shift);
+ free_map[bmapsize - 1] = mask;
+ }
+
+ // fast repeated division by the block size
+ uint64_t magic;
+ size_t shift;
+ mi_get_fast_divisor(bsize, &magic, &shift);
+
+ #if MI_DEBUG>1
+ size_t free_count = 0;
+ #endif
+ for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
+ #if MI_DEBUG>1
+ free_count++;
+ #endif
+ mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
+ size_t offset = (uint8_t*)block - pstart;
+ mi_assert_internal(offset % bsize == 0);
+ mi_assert_internal(offset <= UINT32_MAX);
+ size_t blockidx = mi_fast_divide(offset, magic, shift);
+ mi_assert_internal(blockidx == offset / bsize);
+ mi_assert_internal(blockidx < MI_MAX_BLOCKS);
+ size_t bitidx = (blockidx / MI_INTPTR_BITS);
+ size_t bit = blockidx - (bitidx * MI_INTPTR_BITS);
+ free_map[bitidx] |= ((uintptr_t)1 << bit);
+ }
+ mi_assert_internal(page->capacity == (free_count + page->used));
+
+ // walk through all blocks skipping the free ones
+ #if MI_DEBUG>1
+ size_t used_count = 0;
+ #endif
+ uint8_t* block = pstart;
+ for (size_t i = 0; i < bmapsize; i++) {
+ if (free_map[i] == 0) {
+ // every block is in use
+ for (size_t j = 0; j < MI_INTPTR_BITS; j++) {
+ #if MI_DEBUG>1
+ used_count++;
+ #endif
+ if (!visitor(heap, area, block, ubsize, arg)) return false;
+ block += bsize;
+ }
+ }
+ else {
+ // visit the used blocks in the mask
+ uintptr_t m = ~free_map[i];
+ while (m != 0) {
+ #if MI_DEBUG>1
+ used_count++;
+ #endif
+ size_t bitidx = mi_ctz(m);
+ if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false;
+ m &= m - 1; // clear least significant bit
+ }
+ block += bsize * MI_INTPTR_BITS;
+ }
+ }
+ mi_assert_internal(page->used == used_count);
+ return true;
+}
+
+
+
+// Separate struct to keep `mi_page_t` out of the public interface
+typedef struct mi_heap_area_ex_s {
+ mi_heap_area_t area;
+ mi_page_t* page;
+} mi_heap_area_ex_t;
+
+typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
+
+static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
+ mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
+ mi_heap_area_ex_t xarea;
+ xarea.page = page;
+ _mi_heap_area_init(&xarea.area, page);
+ return fun(heap, &xarea, arg);
+}
+
+// Visit all heap pages as areas
+static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
+ if (visitor == NULL) return false;
+ return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
+}
+
+// Just to pass arguments
+typedef struct mi_visit_blocks_args_s {
+ bool visit_blocks;
+ mi_block_visit_fun* visitor;
+ void* arg;
+} mi_visit_blocks_args_t;
+
+static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {
+ mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;
+ if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;
+ if (args->visit_blocks) {
+ return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg);
+ }
+ else {
+ return true;
+ }
+}
+
+// Visit all blocks in a heap
+bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
+ mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
+ return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
+}
diff --git a/compat/mimalloc/init.c b/compat/mimalloc/init.c
new file mode 100644
index 00000000000000..c6cca89da9c5db
--- /dev/null
+++ b/compat/mimalloc/init.c
@@ -0,0 +1,715 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h"
+
+#include // memcpy, memset
+#include // atexit
+
+
+// Empty page used to initialize the small free pages array
+const mi_page_t _mi_page_empty = {
+ 0,
+ false, false, false, false,
+ 0, // capacity
+ 0, // reserved capacity
+ { 0 }, // flags
+ false, // is_zero
+ 0, // retire_expire
+ NULL, // free
+ NULL, // local_free
+ 0, // used
+ 0, // block size shift
+ 0, // heap tag
+ 0, // block_size
+ NULL, // page_start
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
+ { 0, 0 },
+ #endif
+ MI_ATOMIC_VAR_INIT(0), // xthread_free
+ MI_ATOMIC_VAR_INIT(0), // xheap
+ NULL, NULL
+ , { 0 } // padding
+};
+
+#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
+
+#if (MI_SMALL_WSIZE_MAX==128)
+#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
+#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
+#elif (MI_PADDING>0)
+#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
+#else
+#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
+#endif
+#else
+#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX"
+#endif
+
+// Empty page queues for every bin
+#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) }
+#define MI_PAGE_QUEUES_EMPTY \
+ { QNULL(1), \
+ QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \
+ QNULL( 10), QNULL( 12), QNULL( 14), QNULL( 16), QNULL( 20), QNULL( 24), QNULL( 28), QNULL( 32), /* 16 */ \
+ QNULL( 40), QNULL( 48), QNULL( 56), QNULL( 64), QNULL( 80), QNULL( 96), QNULL( 112), QNULL( 128), /* 24 */ \
+ QNULL( 160), QNULL( 192), QNULL( 224), QNULL( 256), QNULL( 320), QNULL( 384), QNULL( 448), QNULL( 512), /* 32 */ \
+ QNULL( 640), QNULL( 768), QNULL( 896), QNULL( 1024), QNULL( 1280), QNULL( 1536), QNULL( 1792), QNULL( 2048), /* 40 */ \
+ QNULL( 2560), QNULL( 3072), QNULL( 3584), QNULL( 4096), QNULL( 5120), QNULL( 6144), QNULL( 7168), QNULL( 8192), /* 48 */ \
+ QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
+ QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
+ QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
+ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
+ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
+
+#define MI_STAT_COUNT_NULL() {0,0,0}
+
+// Empty statistics
+#define MI_STATS_NULL \
+ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
+ { 0 }, { 0 }, \
+ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
+ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
+ { 0 }, { 0 }, { 0 }, { 0 }, \
+ { 0 }, { 0 }, { 0 }, { 0 }, \
+ \
+ { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, \
+ MI_INIT4(MI_STAT_COUNT_NULL), \
+ { 0 }, { 0 }, { 0 }, { 0 }, \
+ \
+ { MI_INIT4(MI_STAT_COUNT_NULL) }, \
+ { { 0 }, { 0 }, { 0 }, { 0 } }, \
+ \
+ { MI_INIT74(MI_STAT_COUNT_NULL) }, \
+ { MI_INIT74(MI_STAT_COUNT_NULL) }
+
+
+// Empty slice span queues for every bin
+#define SQNULL(sz) { NULL, NULL, sz }
+#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
+ { SQNULL(1), \
+ SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
+ SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
+ SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
+ SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
+ SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
+
+
+// --------------------------------------------------------
+// Statically allocate an empty heap as the initial
+// thread local value for the default heap,
+// and statically allocate the backing heap for the main
+// thread so it can function without doing any allocation
+// itself (as accessing a thread local for the first time
+// may lead to allocation itself on some platforms)
+// --------------------------------------------------------
+
+mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
+ NULL,
+ MI_ATOMIC_VAR_INIT(NULL),
+ 0, // tid
+ 0, // cookie
+ 0, // arena id
+ { 0, 0 }, // keys
+ { {0}, {0}, 0, true }, // random
+ 0, // page count
+ MI_BIN_FULL, 0, // page retired min/max
+ 0, 0, // generic count
+ NULL, // next
+ false, // can reclaim
+ 0, // tag
+ #if MI_GUARDED
+ 0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`)
+ #endif
+ MI_SMALL_PAGES_EMPTY,
+ MI_PAGE_QUEUES_EMPTY
+};
+
+static mi_decl_cache_align mi_subproc_t mi_subproc_default;
+
+#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
+
+mi_decl_cache_align static const mi_tld_t tld_empty = {
+ 0,
+ false,
+ NULL, NULL,
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, tld_empty_stats }, // segments
+ { MI_STAT_VERSION, MI_STATS_NULL } // stats
+};
+
+mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
+ return _mi_prim_thread_id();
+}
+
+// the thread-local default heap for allocation
+mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
+
+extern mi_decl_hidden mi_heap_t _mi_heap_main;
+
+static mi_decl_cache_align mi_tld_t tld_main = {
+ 0, false,
+ &_mi_heap_main, & _mi_heap_main,
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats }, // segments
+ { MI_STAT_VERSION, MI_STATS_NULL } // stats
+};
+
+mi_decl_cache_align mi_heap_t _mi_heap_main = {
+ &tld_main,
+ MI_ATOMIC_VAR_INIT(NULL),
+ 0, // thread id
+ 0, // initial cookie
+ 0, // arena id
+ { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
+ { {0x846ca68b}, {0}, 0, true }, // random
+ 0, // page count
+ MI_BIN_FULL, 0, // page retired min/max
+ 0, 0, // generic count
+ NULL, // next heap
+ false, // can reclaim
+ 0, // tag
+ #if MI_GUARDED
+ 0, 0, 0, 0,
+ #endif
+ MI_SMALL_PAGES_EMPTY,
+ MI_PAGE_QUEUES_EMPTY
+};
+
+bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
+
+mi_stats_t _mi_stats_main = { MI_STAT_VERSION, MI_STATS_NULL };
+
+#if MI_GUARDED
+mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
+ heap->guarded_sample_rate = sample_rate;
+ heap->guarded_sample_count = sample_rate; // count down samples
+ if (heap->guarded_sample_rate > 1) {
+ if (seed == 0) {
+ seed = _mi_heap_random_next(heap);
+ }
+ heap->guarded_sample_count = (seed % heap->guarded_sample_rate) + 1; // start at random count between 1 and `sample_rate`
+ }
+}
+
+mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
+ heap->guarded_size_min = min;
+ heap->guarded_size_max = (min > max ? min : max);
+}
+
+void _mi_heap_guarded_init(mi_heap_t* heap) {
+ mi_heap_guarded_set_sample_rate(heap,
+ (size_t)mi_option_get_clamp(mi_option_guarded_sample_rate, 0, LONG_MAX),
+ (size_t)mi_option_get(mi_option_guarded_sample_seed));
+ mi_heap_guarded_set_size_bound(heap,
+ (size_t)mi_option_get_clamp(mi_option_guarded_min, 0, LONG_MAX),
+ (size_t)mi_option_get_clamp(mi_option_guarded_max, 0, LONG_MAX) );
+}
+#else
+mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
+ MI_UNUSED(heap); MI_UNUSED(sample_rate); MI_UNUSED(seed);
+}
+
+mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
+ MI_UNUSED(heap); MI_UNUSED(min); MI_UNUSED(max);
+}
+void _mi_heap_guarded_init(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+}
+#endif
+
+
+static void mi_heap_main_init(void) {
+ if (_mi_heap_main.cookie == 0) {
+ _mi_heap_main.thread_id = _mi_thread_id();
+ _mi_heap_main.cookie = 1;
+ #if defined(_WIN32) && !defined(MI_SHARED_LIB)
+ _mi_random_init_weak(&_mi_heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking
+ #else
+ _mi_random_init(&_mi_heap_main.random);
+ #endif
+ _mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
+ _mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
+ _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
+ mi_lock_init(&mi_subproc_default.abandoned_os_lock);
+ mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
+ _mi_heap_guarded_init(&_mi_heap_main);
+ }
+}
+
+mi_heap_t* _mi_heap_main_get(void) {
+ mi_heap_main_init();
+ return &_mi_heap_main;
+}
+
+/* -----------------------------------------------------------
+ Sub process
+----------------------------------------------------------- */
+
+mi_subproc_id_t mi_subproc_main(void) {
+ return NULL;
+}
+
+mi_subproc_id_t mi_subproc_new(void) {
+ mi_memid_t memid = _mi_memid_none();
+ mi_subproc_t* subproc = (mi_subproc_t*)_mi_arena_meta_zalloc(sizeof(mi_subproc_t), &memid);
+ if (subproc == NULL) return NULL;
+ subproc->memid = memid;
+ subproc->abandoned_os_list = NULL;
+ mi_lock_init(&subproc->abandoned_os_lock);
+ mi_lock_init(&subproc->abandoned_os_visit_lock);
+ return subproc;
+}
+
+mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id) {
+ return (subproc_id == NULL ? &mi_subproc_default : (mi_subproc_t*)subproc_id);
+}
+
+void mi_subproc_delete(mi_subproc_id_t subproc_id) {
+ if (subproc_id == NULL) return;
+ mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id);
+ // check if there are no abandoned segments still..
+ bool safe_to_delete = false;
+ mi_lock(&subproc->abandoned_os_lock) {
+ if (subproc->abandoned_os_list == NULL) {
+ safe_to_delete = true;
+ }
+ }
+ if (!safe_to_delete) return;
+ // safe to release
+ // todo: should we refcount subprocesses?
+ mi_lock_done(&subproc->abandoned_os_lock);
+ mi_lock_done(&subproc->abandoned_os_visit_lock);
+ _mi_arena_meta_free(subproc, subproc->memid, sizeof(mi_subproc_t));
+}
+
+void mi_subproc_add_current_thread(mi_subproc_id_t subproc_id) {
+ mi_heap_t* heap = mi_heap_get_default();
+ if (heap == NULL) return;
+ mi_assert(heap->tld->segments.subproc == &mi_subproc_default);
+ if (heap->tld->segments.subproc != &mi_subproc_default) return;
+ heap->tld->segments.subproc = _mi_subproc_from_id(subproc_id);
+}
+
+
+
+/* -----------------------------------------------------------
+ Initialization and freeing of the thread local heaps
+----------------------------------------------------------- */
+
+// note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size).
+typedef struct mi_thread_data_s {
+ mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
+ mi_tld_t tld;
+ mi_memid_t memid; // must come last due to zero'ing
+} mi_thread_data_t;
+
+
+// Thread meta-data is allocated directly from the OS. For
+// some programs that do not use thread pools and allocate and
+// destroy many OS threads, this may causes too much overhead
+// per thread so we maintain a small cache of recently freed metadata.
+
+#define TD_CACHE_SIZE (32)
+static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
+
+static mi_thread_data_t* mi_thread_data_zalloc(void) {
+ // try to find thread metadata in the cache
+ mi_thread_data_t* td = NULL;
+ for (int i = 0; i < TD_CACHE_SIZE; i++) {
+ td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
+ if (td != NULL) {
+ // found cached allocation, try use it
+ td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
+ if (td != NULL) {
+ _mi_memzero(td, offsetof(mi_thread_data_t,memid));
+ return td;
+ }
+ }
+ }
+
+ // if that fails, allocate as meta data
+ mi_memid_t memid;
+ td = (mi_thread_data_t*)_mi_os_zalloc(sizeof(mi_thread_data_t), &memid);
+ if (td == NULL) {
+ // if this fails, try once more. (issue #257)
+ td = (mi_thread_data_t*)_mi_os_zalloc(sizeof(mi_thread_data_t), &memid);
+ if (td == NULL) {
+ // really out of memory
+ _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
+ return NULL;
+ }
+ }
+ td->memid = memid;
+ return td;
+}
+
+static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
+ // try to add the thread metadata to the cache
+ for (int i = 0; i < TD_CACHE_SIZE; i++) {
+ mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
+ if (td == NULL) {
+ mi_thread_data_t* expected = NULL;
+ if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) {
+ return;
+ }
+ }
+ }
+ // if that fails, just free it directly
+ _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid);
+}
+
+void _mi_thread_data_collect(void) {
+ // free all thread metadata from the cache
+ for (int i = 0; i < TD_CACHE_SIZE; i++) {
+ mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
+ if (td != NULL) {
+ td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
+ if (td != NULL) {
+ _mi_os_free(td, sizeof(mi_thread_data_t), td->memid);
+ }
+ }
+ }
+}
+
+// Initialize the thread local default heap, called from `mi_thread_init`
+static bool _mi_thread_heap_init(void) {
+ if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true;
+ if (_mi_is_main_thread()) {
+ // mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
+ // the main heap is statically allocated
+ mi_heap_main_init();
+ _mi_heap_set_default_direct(&_mi_heap_main);
+ //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap());
+ }
+ else {
+ // use `_mi_os_alloc` to allocate directly from the OS
+ mi_thread_data_t* td = mi_thread_data_zalloc();
+ if (td == NULL) return false;
+
+ mi_tld_t* tld = &td->tld;
+ mi_heap_t* heap = &td->heap;
+ _mi_tld_init(tld, heap); // must be before `_mi_heap_init`
+ _mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */);
+ _mi_heap_set_default_direct(heap);
+ }
+ return false;
+}
+
+// initialize thread local data
+void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
+ _mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t));
+ tld->heap_backing = bheap;
+ tld->heaps = NULL;
+ tld->segments.subproc = &mi_subproc_default;
+ tld->segments.stats = &tld->stats;
+}
+
+// Free the thread local default heap (called from `mi_thread_done`)
+static bool _mi_thread_heap_done(mi_heap_t* heap) {
+ if (!mi_heap_is_initialized(heap)) return true;
+
+ // reset default heap
+ _mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty);
+
+ // switch to backing heap
+ heap = heap->tld->heap_backing;
+ if (!mi_heap_is_initialized(heap)) return false;
+
+ // delete all non-backing heaps in this thread
+ mi_heap_t* curr = heap->tld->heaps;
+ while (curr != NULL) {
+ mi_heap_t* next = curr->next; // save `next` as `curr` will be freed
+ if (curr != heap) {
+ mi_assert_internal(!mi_heap_is_backing(curr));
+ mi_heap_delete(curr);
+ }
+ curr = next;
+ }
+ mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL);
+ mi_assert_internal(mi_heap_is_backing(heap));
+
+ // collect if not the main thread
+ if (heap != &_mi_heap_main) {
+ _mi_heap_collect_abandon(heap);
+ }
+
+ // merge stats
+ _mi_stats_done(&heap->tld->stats);
+
+ // free if not the main thread
+ if (heap != &_mi_heap_main) {
+ // the following assertion does not always hold for huge segments as those are always treated
+ // as abondened: one may allocate it in one thread, but deallocate in another in which case
+ // the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
+ // mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
+ mi_thread_data_free((mi_thread_data_t*)heap);
+ }
+ else {
+ #if 0
+ // never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
+ // there may still be delete/free calls after the mi_fls_done is called. Issue #207
+ _mi_heap_destroy_pages(heap);
+ mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
+ #endif
+ }
+ return false;
+}
+
+
+
+// --------------------------------------------------------
+// Try to run `mi_thread_done()` automatically so any memory
+// owned by the thread but not yet released can be abandoned
+// and re-owned by another thread.
+//
+// 1. windows dynamic library:
+// call from DllMain on DLL_THREAD_DETACH
+// 2. windows static library:
+// use `FlsAlloc` to call a destructor when the thread is done
+// 3. unix, pthreads:
+// use a pthread key to call a destructor when a pthread is done
+//
+// In the last two cases we also need to call `mi_process_init`
+// to set up the thread local keys.
+// --------------------------------------------------------
+
+// Set up handlers so `mi_thread_done` is called automatically
+static void mi_process_setup_auto_thread_done(void) {
+ static bool tls_initialized = false; // fine if it races
+ if (tls_initialized) return;
+ tls_initialized = true;
+ _mi_prim_thread_init_auto_done();
+ _mi_heap_set_default_direct(&_mi_heap_main);
+}
+
+
+bool _mi_is_main_thread(void) {
+ return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id());
+}
+
+static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1);
+
+size_t _mi_current_thread_count(void) {
+ return mi_atomic_load_relaxed(&thread_count);
+}
+
+// This is called from the `mi_malloc_generic`
+void mi_thread_init(void) mi_attr_noexcept
+{
+ // ensure our process has started already
+ mi_process_init();
+
+ // initialize the thread local default heap
+ // (this will call `_mi_heap_set_default_direct` and thus set the
+ // fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
+ if (_mi_thread_heap_init()) return; // returns true if already initialized
+
+ _mi_stat_increase(&_mi_stats_main.threads, 1);
+ mi_atomic_increment_relaxed(&thread_count);
+ //_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
+}
+
+void mi_thread_done(void) mi_attr_noexcept {
+ _mi_thread_done(NULL);
+}
+
+void _mi_thread_done(mi_heap_t* heap)
+{
+ // calling with NULL implies using the default heap
+ if (heap == NULL) {
+ heap = mi_prim_get_default_heap();
+ if (heap == NULL) return;
+ }
+
+ // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
+ if (!mi_heap_is_initialized(heap)) {
+ return;
+ }
+
+ // adjust stats
+ mi_atomic_decrement_relaxed(&thread_count);
+ _mi_stat_decrease(&_mi_stats_main.threads, 1);
+
+ // check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
+ if (heap->thread_id != _mi_thread_id()) return;
+
+ // abandon the thread local heap
+ if (_mi_thread_heap_done(heap)) return; // returns true if already ran
+}
+
+void _mi_heap_set_default_direct(mi_heap_t* heap) {
+ mi_assert_internal(heap != NULL);
+ #if defined(MI_TLS_SLOT)
+ mi_prim_tls_slot_set(MI_TLS_SLOT,heap);
+ #elif defined(MI_TLS_PTHREAD_SLOT_OFS)
+ *mi_prim_tls_pthread_heap_slot() = heap;
+ #elif defined(MI_TLS_PTHREAD)
+ // we use _mi_heap_default_key
+ #else
+ _mi_heap_default = heap;
+ #endif
+
+ // ensure the default heap is passed to `_mi_thread_done`
+ // setting to a non-NULL value also ensures `mi_thread_done` is called.
+ _mi_prim_thread_associate_default_heap(heap);
+}
+
+void mi_thread_set_in_threadpool(void) mi_attr_noexcept {
+ // nothing
+}
+
+// --------------------------------------------------------
+// Run functions on process init/done, and thread init/done
+// --------------------------------------------------------
+static bool os_preloading = true; // true until this module is initialized
+
+// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false.
+bool mi_decl_noinline _mi_preloading(void) {
+ return os_preloading;
+}
+
+// Returns true if mimalloc was redirected
+mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
+ return _mi_is_redirected();
+}
+
+// Called once by the process loader from `src/prim/prim.c`
+void _mi_auto_process_init(void) {
+ mi_heap_main_init();
+ #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
+ volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
+ if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697)
+ #endif
+ os_preloading = false;
+ mi_assert_internal(_mi_is_main_thread());
+ _mi_options_init();
+ mi_process_setup_auto_thread_done();
+ mi_process_init();
+ if (_mi_is_redirected()) _mi_verbose_message("malloc is redirected.\n");
+
+ // show message from the redirector (if present)
+ const char* msg = NULL;
+ _mi_allocator_init(&msg);
+ if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
+ _mi_fputs(NULL,NULL,NULL,msg);
+ }
+
+ // reseed random
+ _mi_random_reinit_if_weak(&_mi_heap_main.random);
+}
+
+#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
+#include
+mi_decl_cache_align bool _mi_cpu_has_fsrm = false;
+mi_decl_cache_align bool _mi_cpu_has_erms = false;
+
+static void mi_detect_cpu_features(void) {
+ // FSRM for fast short rep movsb/stosb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
+ // EMRS for fast enhanced rep movsb/stosb support
+ int32_t cpu_info[4];
+ __cpuid(cpu_info, 7);
+ _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see
+ _mi_cpu_has_erms = ((cpu_info[1] & (1 << 9)) != 0); // bit 9 of EBX : see
+}
+#else
+static void mi_detect_cpu_features(void) {
+ // nothing
+}
+#endif
+
+// Initialize the process; called by thread_init or the process loader
+void mi_process_init(void) mi_attr_noexcept {
+ // ensure we are called once
+ static mi_atomic_once_t process_init;
+ #if _MSC_VER < 1920
+ mi_heap_main_init(); // vs2017 can dynamically re-initialize _mi_heap_main
+ #endif
+ if (!mi_atomic_once(&process_init)) return;
+ _mi_process_is_initialized = true;
+ _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
+ mi_process_setup_auto_thread_done();
+
+ mi_detect_cpu_features();
+ _mi_os_init();
+ mi_heap_main_init();
+ mi_thread_init();
+
+ #if defined(_WIN32)
+ // On windows, when building as a static lib the FLS cleanup happens to early for the main thread.
+ // To avoid this, set the FLS value for the main thread to NULL so the fls cleanup
+ // will not call _mi_thread_done on the (still executing) main thread. See issue #508.
+ _mi_prim_thread_associate_default_heap(NULL);
+ #endif
+
+ mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL)
+ mi_track_init();
+
+ if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
+ size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024);
+ int reserve_at = (int)mi_option_get_clamp(mi_option_reserve_huge_os_pages_at, -1, INT_MAX);
+ if (reserve_at != -1) {
+ mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500);
+ } else {
+ mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
+ }
+ }
+ if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
+ long ksize = mi_option_get(mi_option_reserve_os_memory);
+ if (ksize > 0) {
+ mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
+ }
+ }
+}
+
+// Called when the process is done (cdecl as it is used with `at_exit` on some platforms)
+void mi_cdecl mi_process_done(void) mi_attr_noexcept {
+ // only shutdown if we were initialized
+ if (!_mi_process_is_initialized) return;
+ // ensure we are called once
+ static bool process_done = false;
+ if (process_done) return;
+ process_done = true;
+
+ // get the default heap so we don't need to acces thread locals anymore
+ mi_heap_t* heap = mi_prim_get_default_heap(); // use prim to not initialize any heap
+ mi_assert_internal(heap != NULL);
+
+ // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
+ _mi_prim_thread_done_auto_done();
+
+
+ #ifndef MI_SKIP_COLLECT_ON_EXIT
+ #if (MI_DEBUG || !defined(MI_SHARED_LIB))
+ // free all memory if possible on process exit. This is not needed for a stand-alone process
+ // but should be done if mimalloc is statically linked into another shared library which
+ // is repeatedly loaded/unloaded, see issue #281.
+ mi_heap_collect(heap, true /* force */ );
+ #endif
+ #endif
+
+ // Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free
+ // since after process_done there might still be other code running that calls `free` (like at_exit routines,
+ // or C-runtime termination code.
+ if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
+ mi_heap_collect(heap, true /* force */);
+ _mi_heap_unsafe_destroy_all(heap); // forcefully release all memory held by all heaps (of this thread only!)
+ _mi_arena_unsafe_destroy_all();
+ _mi_segment_map_unsafe_destroy();
+ }
+
+ if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
+ mi_stats_print(NULL);
+ }
+ _mi_allocator_done();
+ _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
+ os_preloading = true; // don't call the C runtime anymore
+}
+
+void mi_cdecl _mi_auto_process_done(void) mi_attr_noexcept {
+ if (_mi_option_get_fast(mi_option_destroy_on_exit)>1) return;
+ mi_process_done();
+}
diff --git a/compat/mimalloc/libc.c b/compat/mimalloc/libc.c
new file mode 100644
index 00000000000000..52d095eb240dc1
--- /dev/null
+++ b/compat/mimalloc/libc.c
@@ -0,0 +1,334 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// --------------------------------------------------------
+// This module defines various std libc functions to reduce
+// the dependency on libc, and also prevent errors caused
+// by some libc implementations when called before `main`
+// executes (due to malloc redirection)
+// --------------------------------------------------------
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // mi_prim_getenv
+
+char _mi_toupper(char c) {
+ if (c >= 'a' && c <= 'z') return (c - 'a' + 'A');
+ else return c;
+}
+
+int _mi_strnicmp(const char* s, const char* t, size_t n) {
+ if (n == 0) return 0;
+ for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) {
+ if (_mi_toupper(*s) != _mi_toupper(*t)) break;
+ }
+ return (n == 0 ? 0 : *s - *t);
+}
+
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size) {
+ if (dest==NULL || src==NULL || dest_size == 0) return;
+ // copy until end of src, or when dest is (almost) full
+ while (*src != 0 && dest_size > 1) {
+ *dest++ = *src++;
+ dest_size--;
+ }
+ // always zero terminate
+ *dest = 0;
+}
+
+void _mi_strlcat(char* dest, const char* src, size_t dest_size) {
+ if (dest==NULL || src==NULL || dest_size == 0) return;
+ // find end of string in the dest buffer
+ while (*dest != 0 && dest_size > 1) {
+ dest++;
+ dest_size--;
+ }
+ // and catenate
+ _mi_strlcpy(dest, src, dest_size);
+}
+
+size_t _mi_strlen(const char* s) {
+ if (s==NULL) return 0;
+ size_t len = 0;
+ while(s[len] != 0) { len++; }
+ return len;
+}
+
+size_t _mi_strnlen(const char* s, size_t max_len) {
+ if (s==NULL) return 0;
+ size_t len = 0;
+ while(s[len] != 0 && len < max_len) { len++; }
+ return len;
+}
+
+#ifdef MI_NO_GETENV
+bool _mi_getenv(const char* name, char* result, size_t result_size) {
+ MI_UNUSED(name);
+ MI_UNUSED(result);
+ MI_UNUSED(result_size);
+ return false;
+}
+#else
+bool _mi_getenv(const char* name, char* result, size_t result_size) {
+ if (name==NULL || result == NULL || result_size < 64) return false;
+ return _mi_prim_getenv(name,result,result_size);
+}
+#endif
+
+// --------------------------------------------------------
+// Define our own limited `_mi_vsnprintf` and `_mi_snprintf`
+// This is mostly to avoid calling these when libc is not yet
+// initialized (and to reduce dependencies)
+//
+// format: d i, p x u, s
+// prec: z l ll L
+// width: 10
+// align-left: -
+// fill: 0
+// plus: +
+// --------------------------------------------------------
+
+static void mi_outc(char c, char** out, char* end) {
+ char* p = *out;
+ if (p >= end) return;
+ *p = c;
+ *out = p + 1;
+}
+
+static void mi_outs(const char* s, char** out, char* end) {
+ if (s == NULL) return;
+ char* p = *out;
+ while (*s != 0 && p < end) {
+ *p++ = *s++;
+ }
+ *out = p;
+}
+
+static void mi_out_fill(char fill, size_t len, char** out, char* end) {
+ char* p = *out;
+ for (size_t i = 0; i < len && p < end; i++) {
+ *p++ = fill;
+ }
+ *out = p;
+}
+
+static void mi_out_alignright(char fill, char* start, size_t len, size_t extra, char* end) {
+ if (len == 0 || extra == 0) return;
+ if (start + len + extra >= end) return;
+ // move `len` characters to the right (in reverse since it can overlap)
+ for (size_t i = 1; i <= len; i++) {
+ start[len + extra - i] = start[len - i];
+ }
+ // and fill the start
+ for (size_t i = 0; i < extra; i++) {
+ start[i] = fill;
+ }
+}
+
+
+static void mi_out_num(uintmax_t x, size_t base, char prefix, char** out, char* end)
+{
+ if (x == 0 || base == 0 || base > 16) {
+ if (prefix != 0) { mi_outc(prefix, out, end); }
+ mi_outc('0',out,end);
+ }
+ else {
+ // output digits in reverse
+ char* start = *out;
+ while (x > 0) {
+ char digit = (char)(x % base);
+ mi_outc((digit <= 9 ? '0' + digit : 'A' + digit - 10),out,end);
+ x = x / base;
+ }
+ if (prefix != 0) {
+ mi_outc(prefix, out, end);
+ }
+ size_t len = *out - start;
+ // and reverse in-place
+ for (size_t i = 0; i < (len / 2); i++) {
+ char c = start[len - i - 1];
+ start[len - i - 1] = start[i];
+ start[i] = c;
+ }
+ }
+}
+
+
+#define MI_NEXTC() c = *in; if (c==0) break; in++;
+
+int _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) {
+ if (buf == NULL || bufsize == 0 || fmt == NULL) return 0;
+ buf[bufsize - 1] = 0;
+ char* const end = buf + (bufsize - 1);
+ const char* in = fmt;
+ char* out = buf;
+ while (true) {
+ if (out >= end) break;
+ char c;
+ MI_NEXTC();
+ if (c != '%') {
+ if ((c >= ' ' && c <= '~') || c=='\n' || c=='\r' || c=='\t') { // output visible ascii or standard control only
+ mi_outc(c, &out, end);
+ }
+ }
+ else {
+ MI_NEXTC();
+ char fill = ' ';
+ size_t width = 0;
+ char numtype = 'd';
+ char numplus = 0;
+ bool alignright = true;
+ if (c == '+' || c == ' ') { numplus = c; MI_NEXTC(); }
+ if (c == '-') { alignright = false; MI_NEXTC(); }
+ if (c == '0') { fill = '0'; MI_NEXTC(); }
+ if (c >= '1' && c <= '9') {
+ width = (c - '0'); MI_NEXTC();
+ while (c >= '0' && c <= '9') {
+ width = (10 * width) + (c - '0'); MI_NEXTC();
+ }
+ if (c == 0) break; // extra check due to while
+ }
+ if (c == 'z' || c == 't' || c == 'L') { numtype = c; MI_NEXTC(); }
+ else if (c == 'l') {
+ numtype = c; MI_NEXTC();
+ if (c == 'l') { numtype = 'L'; MI_NEXTC(); }
+ }
+
+ char* start = out;
+ if (c == 's') {
+ // string
+ const char* s = va_arg(args, const char*);
+ mi_outs(s, &out, end);
+ }
+ else if (c == 'p' || c == 'x' || c == 'u') {
+ // unsigned
+ uintmax_t x = 0;
+ if (c == 'x' || c == 'u') {
+ if (numtype == 'z') x = va_arg(args, size_t);
+ else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t
+ else if (numtype == 'L') x = va_arg(args, unsigned long long);
+ else if (numtype == 'l') x = va_arg(args, unsigned long);
+ else x = va_arg(args, unsigned int);
+ }
+ else if (c == 'p') {
+ x = va_arg(args, uintptr_t);
+ mi_outs("0x", &out, end);
+ start = out;
+ width = (width >= 2 ? width - 2 : 0);
+ }
+ if (width == 0 && (c == 'x' || c == 'p')) {
+ if (c == 'p') { width = 2 * (x <= UINT32_MAX ? 4 : ((x >> 16) <= UINT32_MAX ? 6 : sizeof(void*))); }
+ if (width == 0) { width = 2; }
+ fill = '0';
+ }
+ mi_out_num(x, (c == 'x' || c == 'p' ? 16 : 10), numplus, &out, end);
+ }
+ else if (c == 'i' || c == 'd') {
+ // signed
+ intmax_t x = 0;
+ if (numtype == 'z') x = va_arg(args, intptr_t );
+ else if (numtype == 't') x = va_arg(args, ptrdiff_t);
+ else if (numtype == 'L') x = va_arg(args, long long);
+ else if (numtype == 'l') x = va_arg(args, long);
+ else x = va_arg(args, int);
+ char pre = 0;
+ if (x < 0) {
+ pre = '-';
+ if (x > INTMAX_MIN) { x = -x; }
+ }
+ else if (numplus != 0) {
+ pre = numplus;
+ }
+ mi_out_num((uintmax_t)x, 10, pre, &out, end);
+ }
+ else if (c >= ' ' && c <= '~') {
+ // unknown format
+ mi_outc('%', &out, end);
+ mi_outc(c, &out, end);
+ }
+
+ // fill & align
+ mi_assert_internal(out <= end);
+ mi_assert_internal(out >= start);
+ const size_t len = out - start;
+ if (len < width) {
+ mi_out_fill(fill, width - len, &out, end);
+ if (alignright && out <= end) {
+ mi_out_alignright(fill, start, len, width - len, end);
+ }
+ }
+ }
+ }
+ mi_assert_internal(out <= end);
+ *out = 0;
+ return (int)(out - buf);
+}
+
+int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ const int written = _mi_vsnprintf(buf, buflen, fmt, args);
+ va_end(args);
+ return written;
+}
+
+
+#if MI_SIZE_SIZE == 4
+#define mi_mask_even_bits32 (0x55555555)
+#define mi_mask_even_pairs32 (0x33333333)
+#define mi_mask_even_nibbles32 (0x0F0F0F0F)
+
+// sum of all the bytes in `x` if it is guaranteed that the sum < 256!
+static size_t mi_byte_sum32(uint32_t x) {
+ // perform `x * 0x01010101`: the highest byte contains the sum of all bytes.
+ x += (x << 8);
+ x += (x << 16);
+ return (size_t)(x >> 24);
+}
+
+static size_t mi_popcount_generic32(uint32_t x) {
+ // first count each 2-bit group `a`, where: a==0b00 -> 00, a==0b01 -> 01, a==0b10 -> 01, a==0b11 -> 10
+ // in other words, `a - (a>>1)`; to do this in parallel, we need to mask to prevent spilling a bit pair
+ // into the lower bit-pair:
+ x = x - ((x >> 1) & mi_mask_even_bits32);
+ // add the 2-bit pair results
+ x = (x & mi_mask_even_pairs32) + ((x >> 2) & mi_mask_even_pairs32);
+ // add the 4-bit nibble results
+ x = (x + (x >> 4)) & mi_mask_even_nibbles32;
+ // each byte now has a count of its bits, we can sum them now:
+ return mi_byte_sum32(x);
+}
+
+mi_decl_noinline size_t _mi_popcount_generic(size_t x) {
+ return mi_popcount_generic32(x);
+}
+
+#else
+#define mi_mask_even_bits64 (0x5555555555555555)
+#define mi_mask_even_pairs64 (0x3333333333333333)
+#define mi_mask_even_nibbles64 (0x0F0F0F0F0F0F0F0F)
+
+// sum of all the bytes in `x` if it is guaranteed that the sum < 256!
+static size_t mi_byte_sum64(uint64_t x) {
+ x += (x << 8);
+ x += (x << 16);
+ x += (x << 32);
+ return (size_t)(x >> 56);
+}
+
+static size_t mi_popcount_generic64(uint64_t x) {
+ x = x - ((x >> 1) & mi_mask_even_bits64);
+ x = (x & mi_mask_even_pairs64) + ((x >> 2) & mi_mask_even_pairs64);
+ x = (x + (x >> 4)) & mi_mask_even_nibbles64;
+ return mi_byte_sum64(x);
+}
+
+mi_decl_noinline size_t _mi_popcount_generic(size_t x) {
+ return mi_popcount_generic64(x);
+}
+#endif
+
diff --git a/compat/mimalloc/mimalloc-stats.h b/compat/mimalloc/mimalloc-stats.h
new file mode 100644
index 00000000000000..12c5c9a7d6ced7
--- /dev/null
+++ b/compat/mimalloc/mimalloc-stats.h
@@ -0,0 +1,104 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_STATS_H
+#define MIMALLOC_STATS_H
+
+#include
+#include
+
+#define MI_STAT_VERSION 3 // increased on every backward incompatible change
+
+// count allocation over time
+typedef struct mi_stat_count_s {
+ int64_t total; // total allocated
+ int64_t peak; // peak allocation
+ int64_t current; // current allocation
+} mi_stat_count_t;
+
+// counters only increase
+typedef struct mi_stat_counter_s {
+ int64_t total; // total count
+} mi_stat_counter_t;
+
+#define MI_STAT_FIELDS() \
+ MI_STAT_COUNT(pages) /* count of mimalloc pages */ \
+ MI_STAT_COUNT(reserved) /* reserved memory bytes */ \
+ MI_STAT_COUNT(committed) /* committed bytes */ \
+ MI_STAT_COUNTER(reset) /* reset bytes */ \
+ MI_STAT_COUNTER(purged) /* purged bytes */ \
+ MI_STAT_COUNT(page_committed) /* committed memory inside pages */ \
+ MI_STAT_COUNT(pages_abandoned) /* abandonded pages count */ \
+ MI_STAT_COUNT(threads) /* number of threads */ \
+ MI_STAT_COUNT(malloc_normal) /* allocated bytes <= MI_LARGE_OBJ_SIZE_MAX */ \
+ MI_STAT_COUNT(malloc_huge) /* allocated bytes in huge pages */ \
+ MI_STAT_COUNT(malloc_requested) /* malloc requested bytes */ \
+ \
+ MI_STAT_COUNTER(mmap_calls) \
+ MI_STAT_COUNTER(commit_calls) \
+ MI_STAT_COUNTER(reset_calls) \
+ MI_STAT_COUNTER(purge_calls) \
+ MI_STAT_COUNTER(arena_count) /* number of memory arena's */ \
+ MI_STAT_COUNTER(malloc_normal_count) /* number of blocks <= MI_LARGE_OBJ_SIZE_MAX */ \
+ MI_STAT_COUNTER(malloc_huge_count) /* number of huge bloks */ \
+ MI_STAT_COUNTER(malloc_guarded_count) /* number of allocations with guard pages */ \
+ \
+ /* internal statistics */ \
+ MI_STAT_COUNTER(arena_rollback_count) \
+ MI_STAT_COUNTER(arena_purges) \
+ MI_STAT_COUNTER(pages_extended) /* number of page extensions */ \
+ MI_STAT_COUNTER(pages_retire) /* number of pages that are retired */ \
+ MI_STAT_COUNTER(page_searches) /* total pages searched for a fresh page */ \
+ MI_STAT_COUNTER(page_searches_count) /* searched count for a fresh page */ \
+ /* only on v1 and v2 */ \
+ MI_STAT_COUNT(segments) \
+ MI_STAT_COUNT(segments_abandoned) \
+ MI_STAT_COUNT(segments_cache) \
+ MI_STAT_COUNT(_segments_reserved) \
+ /* only on v3 */ \
+ MI_STAT_COUNTER(pages_reclaim_on_alloc) \
+ MI_STAT_COUNTER(pages_reclaim_on_free) \
+ MI_STAT_COUNTER(pages_reabandon_full) \
+ MI_STAT_COUNTER(pages_unabandon_busy_wait) \
+
+
+// Define the statistics structure
+#define MI_BIN_HUGE (73U) // see types.h
+#define MI_STAT_COUNT(stat) mi_stat_count_t stat;
+#define MI_STAT_COUNTER(stat) mi_stat_counter_t stat;
+
+typedef struct mi_stats_s
+{
+ int version;
+
+ MI_STAT_FIELDS()
+
+ // future extension
+ mi_stat_count_t _stat_reserved[4];
+ mi_stat_counter_t _stat_counter_reserved[4];
+
+ // size segregated statistics
+ mi_stat_count_t malloc_bins[MI_BIN_HUGE+1]; // allocation per size bin
+ mi_stat_count_t page_bins[MI_BIN_HUGE+1]; // pages allocated per size bin
+} mi_stats_t;
+
+#undef MI_STAT_COUNT
+#undef MI_STAT_COUNTER
+
+// Exported definitions
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+mi_decl_export void mi_stats_get( size_t stats_size, mi_stats_t* stats ) mi_attr_noexcept;
+mi_decl_export char* mi_stats_get_json( size_t buf_size, char* buf ) mi_attr_noexcept; // use mi_free to free the result if the input buf == NULL
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MIMALLOC_STATS_H
diff --git a/compat/mimalloc/mimalloc.h b/compat/mimalloc/mimalloc.h
new file mode 100644
index 00000000000000..6a926d1c802a65
--- /dev/null
+++ b/compat/mimalloc/mimalloc.h
@@ -0,0 +1,630 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2026, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_H
+#define MIMALLOC_H
+
+#define MI_MALLOC_VERSION 227 // major + 2 digits minor
+
+// ------------------------------------------------------
+// Compiler specific attributes
+// ------------------------------------------------------
+
+#ifdef __cplusplus
+ #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
+ #define mi_attr_noexcept noexcept
+ #else
+ #define mi_attr_noexcept throw()
+ #endif
+#else
+ #define mi_attr_noexcept
+#endif
+
+#if defined(__cplusplus) && (__cplusplus >= 201703)
+ #define mi_decl_nodiscard [[nodiscard]]
+#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl
+ #define mi_decl_nodiscard __attribute__((warn_unused_result))
+#elif defined(_HAS_NODISCARD)
+ #define mi_decl_nodiscard _NODISCARD
+#elif (_MSC_VER >= 1700)
+ #define mi_decl_nodiscard _Check_return_
+#else
+ #define mi_decl_nodiscard
+#endif
+
+#if defined(_MSC_VER) || defined(__MINGW32__)
+ #if !defined(MI_SHARED_LIB)
+ #define mi_decl_export
+ #elif defined(MI_SHARED_LIB_EXPORT)
+ #define mi_decl_export __declspec(dllexport)
+ #else
+ #define mi_decl_export __declspec(dllimport)
+ #endif
+ #if defined(__MINGW32__)
+ #define mi_decl_restrict
+ #define mi_attr_malloc __attribute__((malloc))
+ #else
+ #if (_MSC_VER >= 1900) && !defined(__EDG__)
+ #define mi_decl_restrict __declspec(allocator) __declspec(restrict)
+ #else
+ #define mi_decl_restrict __declspec(restrict)
+ #endif
+ #define mi_attr_malloc
+ #endif
+ #define mi_cdecl __cdecl
+ #define mi_attr_alloc_size(s)
+ #define mi_attr_alloc_size2(s1,s2)
+ #define mi_attr_alloc_align(p)
+#elif defined(__GNUC__) // includes clang and icc
+ #if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT)
+ #define mi_decl_export __attribute__((visibility("default")))
+ #else
+ #define mi_decl_export
+ #endif
+ #define mi_cdecl // leads to warnings... __attribute__((cdecl))
+ #define mi_decl_restrict
+ #define mi_attr_malloc __attribute__((malloc))
+ #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
+ #define mi_attr_alloc_size(s)
+ #define mi_attr_alloc_size2(s1,s2)
+ #define mi_attr_alloc_align(p)
+ #elif defined(__INTEL_COMPILER)
+ #define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
+ #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
+ #define mi_attr_alloc_align(p)
+ #else
+ #define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
+ #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
+ #define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
+ #endif
+#else
+ #define mi_cdecl
+ #define mi_decl_export
+ #define mi_decl_restrict
+ #define mi_attr_malloc
+ #define mi_attr_alloc_size(s)
+ #define mi_attr_alloc_size2(s1,s2)
+ #define mi_attr_alloc_align(p)
+#endif
+
+// ------------------------------------------------------
+// Includes
+// ------------------------------------------------------
+
+#include "compat/posix.h"
+
+#include // bool
+#include // INTPTR_MAX
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// ------------------------------------------------------
+// Standard malloc interface
+// ------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
+mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
+mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
+
+mi_decl_export void mi_free(void* p) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
+
+// ------------------------------------------------------
+// Extended functionality
+// ------------------------------------------------------
+#define MI_SMALL_WSIZE_MAX (128)
+#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*))
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
+mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
+mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
+
+mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
+
+
+// ------------------------------------------------------
+// Internals
+// ------------------------------------------------------
+
+typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
+mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept;
+
+typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg);
+mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept;
+
+typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
+mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
+
+mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
+mi_decl_export int mi_version(void) mi_attr_noexcept;
+mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
+mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
+mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL
+mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
+mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
+mi_decl_export void mi_options_print(void) mi_attr_noexcept;
+
+mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs,
+ size_t* current_rss, size_t* peak_rss,
+ size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept;
+
+
+// Generally do not use the following as these are usually called automatically
+mi_decl_export void mi_process_init(void) mi_attr_noexcept;
+mi_decl_export void mi_cdecl mi_process_done(void) mi_attr_noexcept;
+mi_decl_export void mi_thread_init(void) mi_attr_noexcept;
+mi_decl_export void mi_thread_done(void) mi_attr_noexcept;
+
+
+// -------------------------------------------------------------------------------------
+// Aligned allocation
+// Note that `alignment` always follows `size` for consistency with unaligned
+// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
+// -------------------------------------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
+mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
+mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
+
+
+// -----------------------------------------------------------------
+// Return allocated block size (if the return value is not NULL)
+// -----------------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_umalloc(size_t size, size_t* block_size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_ucalloc(size_t count, size_t size, size_t* block_size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
+mi_decl_nodiscard mi_decl_export void* mi_urealloc(void* p, size_t newsize, size_t* block_size_pre, size_t* block_size_post) mi_attr_noexcept mi_attr_alloc_size(2);
+mi_decl_export void mi_ufree(void* p, size_t* block_size) mi_attr_noexcept;
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_umalloc_aligned(size_t size, size_t alignment, size_t* block_size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_uzalloc_aligned(size_t size, size_t alignment, size_t* block_size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_umalloc_small(size_t size, size_t* block_size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_uzalloc_small(size_t size, size_t* block_size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+
+
+// -------------------------------------------------------------------------------------
+// Heaps: first-class, but can only allocate from the same thread that created it.
+// -------------------------------------------------------------------------------------
+
+struct mi_heap_s;
+typedef struct mi_heap_s mi_heap_t;
+
+mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
+mi_decl_export void mi_heap_delete(mi_heap_t* heap);
+mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
+mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
+mi_decl_export mi_heap_t* mi_heap_get_default(void);
+mi_decl_export mi_heap_t* mi_heap_get_backing(void);
+mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
+
+mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
+mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
+mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
+mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
+mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
+
+
+// --------------------------------------------------------------------------------
+// Zero initialized re-allocation.
+// Only valid on memory that was originally allocated with zero initialization too.
+// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc.
+// see
+// --------------------------------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
+
+mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
+mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
+mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3);
+
+mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
+mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
+
+mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
+mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
+mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
+mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4);
+
+
+// ------------------------------------------------------
+// Analysis
+// ------------------------------------------------------
+
+mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
+mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
+mi_decl_export bool mi_check_owned(const void* p);
+
+// An area of heap space contains blocks of a single size.
+typedef struct mi_heap_area_s {
+ void* blocks; // start of the area containing heap blocks
+ size_t reserved; // bytes reserved for this area (virtual)
+ size_t committed; // current available bytes for this area
+ size_t used; // number of allocated blocks
+ size_t block_size; // size in bytes of each block
+ size_t full_block_size; // size in bytes of a full block including padding and metadata.
+ int heap_tag; // heap tag associated with this area
+} mi_heap_area_t;
+
+typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
+
+mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
+
+// Experimental
+mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
+
+mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
+mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
+
+mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
+mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
+
+mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
+mi_decl_export void mi_arenas_print(void) mi_attr_noexcept;
+
+// Experimental: heaps associated with specific memory arena's
+typedef int mi_arena_id_t;
+mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
+mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
+mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
+mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
+
+#if MI_MALLOC_VERSION >= 182
+// Create a heap that only allocates in the specified arena
+mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
+#endif
+
+
+// Experimental: allow sub-processes whose memory areas stay separated (and no reclamation between them)
+// Used for example for separate interpreters in one process.
+typedef void* mi_subproc_id_t;
+mi_decl_export mi_subproc_id_t mi_subproc_main(void);
+mi_decl_export mi_subproc_id_t mi_subproc_new(void);
+mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
+mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
+
+// Experimental: visit abandoned heap areas (that are not owned by a specific heap)
+mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
+
+// Experimental: objects followed by a guard page.
+// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
+// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
+mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
+mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);
+
+// Experimental: communicate that the thread is part of a threadpool
+mi_decl_export void mi_thread_set_in_threadpool(void) mi_attr_noexcept;
+
+// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread
+// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will
+// fall back to `mi_heap_delete`.
+mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id);
+
+// deprecated
+mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
+mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
+
+
+
+// ------------------------------------------------------
+// Convenience
+// ------------------------------------------------------
+
+#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
+#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
+#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
+#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
+#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
+#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
+
+#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
+#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
+#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp)))
+#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp)))
+#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp)))
+#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp)))
+
+
+// ------------------------------------------------------
+// Options
+// ------------------------------------------------------
+
+typedef enum mi_option_e {
+ // stable options
+ mi_option_show_errors, // print error messages
+ mi_option_show_stats, // print statistics on termination
+ mi_option_verbose, // print verbose messages
+ // advanced options
+ mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
+ mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
+ mi_option_purge_decommits, // should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit)
+ mi_option_allow_large_os_pages, // allow use of large (2 or 4 MiB) OS pages, implies eager commit.
+ mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB pages) at startup
+ mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
+ mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use `mi_option_get_size`)
+ mi_option_deprecated_segment_cache,
+ mi_option_deprecated_page_reset,
+ mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
+ mi_option_deprecated_segment_reset,
+ mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
+ mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10)
+ mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
+ mi_option_disallow_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
+ mi_option_os_tag, // tag used for OS logging (macOS only for now) (=100)
+ mi_option_max_errors, // issue at most N error messages
+ mi_option_max_warnings, // issue at most N warning messages
+ mi_option_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%)
+ mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe
+ mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`)
+ mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10)
+ mi_option_purge_extend_delay,
+ mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
+ mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
+ mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
+ mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
+ mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0)
+ mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0)
+ mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
+ mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
+ mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
+ mi_option_target_segments_per_thread, // experimental (=0)
+ mi_option_generic_collect, // collect heaps every N (=10000) generic allocation calls
+ mi_option_allow_thp, // allow transparent huge pages? (=1) (on Android =0 by default). Set to 0 to disable THP for the process.
+ _mi_option_last,
+ // legacy option names
+ mi_option_large_os_pages = mi_option_allow_large_os_pages,
+ mi_option_eager_region_commit = mi_option_arena_eager_commit,
+ mi_option_reset_decommits = mi_option_purge_decommits,
+ mi_option_reset_delay = mi_option_purge_delay,
+ mi_option_abandoned_page_reset = mi_option_abandoned_page_purge,
+ mi_option_limit_os_alloc = mi_option_disallow_os_alloc
+} mi_option_t;
+
+
+mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option);
+mi_decl_export void mi_option_enable(mi_option_t option);
+mi_decl_export void mi_option_disable(mi_option_t option);
+mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
+mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
+
+mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
+mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
+mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
+mi_decl_export void mi_option_set(mi_option_t option, long value);
+mi_decl_export void mi_option_set_default(mi_option_t option, long value);
+
+
+// -------------------------------------------------------------------------------------------------------
+// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions.
+// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.)
+// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing.
+// -------------------------------------------------------------------------------------------------------
+
+mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
+mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
+
+mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
+
+mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
+mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc;
+mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc;
+mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
+mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
+
+mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
+mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept;
+mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
+
+// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`.
+// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception).
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
+mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
+
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
+
+#ifdef __cplusplus
+}
+#endif
+
+// ---------------------------------------------------------------------------------------------
+// Implement the C++ std::allocator interface for use in STL containers.
+// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally)
+// ---------------------------------------------------------------------------------------------
+#ifdef __cplusplus
+
+#include // std::size_t
+#include // PTRDIFF_MAX
+#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
+#include // std::true_type
+#include // std::forward
+#endif
+
+template struct _mi_stl_allocator_common {
+ typedef T value_type;
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_type& reference;
+ typedef value_type const& const_reference;
+ typedef value_type* pointer;
+ typedef value_type const* const_pointer;
+
+ #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+ template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); }
+ template void destroy(U* p) mi_attr_noexcept { p->~U(); }
+ #else
+ void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
+ void destroy(pointer p) { p->~value_type(); }
+ #endif
+
+ size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+};
+
+template struct mi_stl_allocator : public _mi_stl_allocator_common {
+ using typename _mi_stl_allocator_common::size_type;
+ using typename _mi_stl_allocator_common::value_type;
+ using typename _mi_stl_allocator_common::pointer;
+ template struct rebind { typedef mi_stl_allocator other; };
+
+ mi_stl_allocator() mi_attr_noexcept = default;
+ mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default;
+ template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { }
+ mi_stl_allocator select_on_container_copy_construction() const { return *this; }
+ void deallocate(T* p, size_type) { mi_free(p); }
+
+ #if (__cplusplus >= 201703L) // C++17
+ mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); }
+ mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
+ #else
+ mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); }
+ #endif
+
+ #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
+ using is_always_equal = std::true_type;
+ #endif
+};
+
+template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; }
+template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; }
+
+
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
+#define MI_HAS_HEAP_STL_ALLOCATOR 1
+
+#include // std::shared_ptr
+
+// Common base class for STL allocators in a specific heap
+template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common {
+ using typename _mi_stl_allocator_common::size_type;
+ using typename _mi_stl_allocator_common::value_type;
+ using typename _mi_stl_allocator_common::pointer;
+
+ _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp, [](mi_heap_t*) {}) {} /* will not delete nor destroy the passed in heap */
+
+ #if (__cplusplus >= 201703L) // C++17
+ mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
+ mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
+ #else
+ mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
+ #endif
+
+ #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
+ using is_always_equal = std::false_type;
+ #endif
+
+ void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
+ template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); }
+
+protected:
+ std::shared_ptr heap;
+ template friend struct _mi_heap_stl_allocator_common;
+
+ _mi_heap_stl_allocator_common() {
+ mi_heap_t* hp = mi_heap_new();
+ this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
+ }
+ _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
+ template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
+
+private:
+ static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
+ static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
+};
+
+// STL allocator allocation in a specific heap
+template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common {
+ using typename _mi_heap_stl_allocator_common::size_type;
+ mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called
+ mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
+
+ mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
+ void deallocate(T* p, size_type) { mi_free(p); }
+ template struct rebind { typedef mi_heap_stl_allocator other; };
+};
+
+template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); }
+template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); }
+
+
+// STL allocator allocation in a specific heap, where `free` does nothing and
+// the heap is destroyed in one go on destruction -- use with care!
+template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common {
+ using typename _mi_heap_stl_allocator_common::size_type;
+ mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called
+ mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
+
+ mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
+ void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ }
+ template struct rebind { typedef mi_heap_destroy_stl_allocator other; };
+};
+
+template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); }
+template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); }
+
+#endif // C++11
+
+#endif // __cplusplus
+
+#endif
diff --git a/compat/mimalloc/mimalloc/atomic.h b/compat/mimalloc/mimalloc/atomic.h
new file mode 100644
index 00000000000000..e8bac316b3a6f3
--- /dev/null
+++ b/compat/mimalloc/mimalloc/atomic.h
@@ -0,0 +1,557 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2024 Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_ATOMIC_H
+#define MIMALLOC_ATOMIC_H
+
+// include windows.h or pthreads.h
+#if defined(_WIN32)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include
+#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__))
+#define MI_USE_PTHREADS
+#include
+#endif
+
+// --------------------------------------------------------------------------------------------
+// Atomics
+// We need to be portable between C, C++, and MSVC.
+// We base the primitives on the C/C++ atomics and create a minimal wrapper for MSVC in C compilation mode.
+// This is why we try to use only `uintptr_t` and `*` as atomic types.
+// To gain better insight in the range of used atomics, we use explicitly named memory order operations
+// instead of passing the memory order as a parameter.
+// -----------------------------------------------------------------------------------------------
+
+#if defined(__cplusplus)
+// Use C++ atomics
+#include
+#define _Atomic(tp) std::atomic
+#define mi_atomic(name) std::atomic_##name
+#define mi_memory_order(name) std::memory_order_##name
+#if (__cplusplus >= 202002L) // c++20, see issue #571
+ #define MI_ATOMIC_VAR_INIT(x) x
+#elif !defined(ATOMIC_VAR_INIT)
+ #define MI_ATOMIC_VAR_INIT(x) x
+#else
+ #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
+#endif
+#elif defined(_MSC_VER)
+// Use MSVC C wrapper for C11 atomics
+#define _Atomic(tp) tp
+#define MI_ATOMIC_VAR_INIT(x) x
+#define mi_atomic(name) mi_atomic_##name
+#define mi_memory_order(name) mi_memory_order_##name
+#else
+// Use C11 atomics
+#include
+#define mi_atomic(name) atomic_##name
+#define mi_memory_order(name) memory_order_##name
+#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735
+ #define MI_ATOMIC_VAR_INIT(x) x
+#elif !defined(ATOMIC_VAR_INIT)
+ #define MI_ATOMIC_VAR_INIT(x) x
+#else
+ #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
+#endif
+#endif
+
+// Various defines for all used memory orders in mimalloc
+#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \
+ mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail)
+
+#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \
+ mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail)
+
+#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
+#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
+#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
+#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_exchange_relaxed(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
+#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
+#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
+#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
+#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
+#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
+
+#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel))
+#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel))
+#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
+#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
+
+#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1)
+#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1)
+#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1)
+#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1)
+
+static inline void mi_atomic_yield(void);
+static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add);
+static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
+
+
+#if defined(__cplusplus) || !defined(_MSC_VER)
+
+// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
+// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
+#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p)
+#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p)
+
+// In C++ we need to add casts to help resolve templates if NULL is passed
+#if defined(__cplusplus)
+#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x)
+#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x)
+#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
+#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
+#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
+#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,(tp*)des)
+#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x)
+#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
+#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
+#else
+#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x)
+#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x)
+#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
+#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
+#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
+#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,des)
+#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x)
+#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
+#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
+#endif
+
+// These are used by the statistics
+static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
+ return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
+}
+static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) {
+ const int64_t add = mi_atomic_load_relaxed((_Atomic(int64_t)*)padd);
+ if (add != 0) {
+ mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
+ }
+}
+static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
+ int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
+ while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ };
+}
+
+// Used by timers
+#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
+#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
+#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
+#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+
+#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
+#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
+
+
+#elif defined(_MSC_VER)
+
+// Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics.
+#include
+#ifdef _WIN64
+typedef LONG64 msc_intptr_t;
+#define MI_64(f) f##64
+#else
+typedef LONG msc_intptr_t;
+#define MI_64(f) f
+#endif
+
+typedef enum mi_memory_order_e {
+ mi_memory_order_relaxed,
+ mi_memory_order_consume,
+ mi_memory_order_acquire,
+ mi_memory_order_release,
+ mi_memory_order_acq_rel,
+ mi_memory_order_seq_cst
+} mi_memory_order;
+
+static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) {
+ (void)(mo);
+ return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
+}
+static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
+ (void)(mo);
+ return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
+}
+static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
+ (void)(mo);
+ return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
+}
+static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
+ (void)(mo);
+ return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
+}
+static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
+ (void)(mo1); (void)(mo2);
+ uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
+ if (read == *expected) {
+ return true;
+ }
+ else {
+ *expected = read;
+ return false;
+ }
+}
+static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
+ return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
+}
+static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) {
+ (void)(mo);
+ return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
+}
+static inline void mi_atomic_thread_fence(mi_memory_order mo) {
+ (void)(mo);
+ _Atomic(uintptr_t) x = 0;
+ mi_atomic_exchange_explicit(&x, 1, mo);
+}
+static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
+ (void)(mo);
+#if defined(_M_IX86) || defined(_M_X64)
+ return *p;
+#else
+ uintptr_t x = *p;
+ if (mo > mi_memory_order_relaxed) {
+ while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ };
+ }
+ return x;
+#endif
+}
+static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
+ (void)(mo);
+#if defined(_M_IX86) || defined(_M_X64)
+ *p = x;
+#else
+ mi_atomic_exchange_explicit(p, x, mo);
+#endif
+}
+static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) {
+ (void)(mo);
+#if defined(_M_X64)
+ return *p;
+#else
+ int64_t old = *p;
+ int64_t x = old;
+ while ((old = InterlockedCompareExchange64(p, x, old)) != x) {
+ x = old;
+ }
+ return x;
+#endif
+}
+static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) {
+ (void)(mo);
+#if defined(x_M_IX86) || defined(_M_X64)
+ *p = x;
+#else
+ InterlockedExchange64(p, x);
+#endif
+}
+
+// These are used by the statistics
+static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) {
+#ifdef _WIN64
+ return (int64_t)mi_atomic_addi((int64_t*)p, add);
+#else
+ int64_t current;
+ int64_t sum;
+ do {
+ current = *p;
+ sum = current + add;
+ } while (_InterlockedCompareExchange64(p, sum, current) != current);
+ return current;
+#endif
+}
+static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) {
+ const int64_t add = *padd;
+ if (add != 0) {
+ mi_atomic_addi64_relaxed((volatile _Atomic(int64_t)*)p, add);
+ }
+}
+
+static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
+ int64_t current;
+ do {
+ current = *p;
+ } while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
+}
+
+static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
+ mi_atomic_addi64_relaxed(p, i);
+}
+
+static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
+ int64_t read = _InterlockedCompareExchange64(p, des, *exp);
+ if (read == *exp) {
+ return true;
+ }
+ else {
+ *exp = read;
+ return false;
+ }
+}
+
+// The pointer macros cast to `uintptr_t`.
+#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
+#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
+#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
+#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
+#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
+#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
+#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
+#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
+#define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
+#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
+#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
+
+#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire))
+#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed))
+#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release))
+#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed))
+
+
+#endif
+
+
+// Atomically add a signed value; returns the previous value.
+static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) {
+ return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add);
+}
+
+// Atomically subtract a signed value; returns the previous value.
+static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
+ return (intptr_t)mi_atomic_addi(p, -sub);
+}
+
+
+// ----------------------------------------------------------------------
+// Once and Guard
+// ----------------------------------------------------------------------
+
+typedef _Atomic(uintptr_t) mi_atomic_once_t;
+
+// Returns true only on the first invocation
+static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
+ if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
+ uintptr_t expected = 0;
+ return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
+}
+
+typedef _Atomic(uintptr_t) mi_atomic_guard_t;
+
+// Allows only one thread to execute at a time
+#define mi_atomic_guard(guard) \
+ uintptr_t _mi_guard_expected = 0; \
+ for(bool _mi_guard_once = true; \
+ _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
+ (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
+
+
+
+// ----------------------------------------------------------------------
+// Yield
+// ----------------------------------------------------------------------
+
+#if defined(__cplusplus)
+#include
+static inline void mi_atomic_yield(void) {
+ std::this_thread::yield();
+}
+#elif defined(_WIN32)
+static inline void mi_atomic_yield(void) {
+ YieldProcessor();
+}
+#elif defined(__SSE2__)
+#include
+static inline void mi_atomic_yield(void) {
+ _mm_pause();
+}
+#elif (defined(__GNUC__) || defined(__clang__)) && \
+ (defined(__x86_64__) || defined(__i386__) || \
+ defined(__aarch64__) || defined(__arm__) || \
+ defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__))
+#if defined(__x86_64__) || defined(__i386__)
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile ("pause" ::: "memory");
+}
+#elif defined(__aarch64__)
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile("wfe");
+}
+#elif defined(__arm__)
+#if __ARM_ARCH >= 7
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile("yield" ::: "memory");
+}
+#else
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile ("nop" ::: "memory");
+}
+#endif
+#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
+#ifdef __APPLE__
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile ("or r27,r27,r27" ::: "memory");
+}
+#else
+static inline void mi_atomic_yield(void) {
+ __asm__ __volatile__ ("or 27,27,27" ::: "memory");
+}
+#endif
+#endif
+#elif defined(__sun)
+// Fallback for other archs
+#include
+static inline void mi_atomic_yield(void) {
+ smt_pause();
+}
+#elif defined(__wasi__)
+#include
+static inline void mi_atomic_yield(void) {
+ sched_yield();
+}
+#else
+#include
+static inline void mi_atomic_yield(void) {
+ sleep(0);
+}
+#endif
+
+
+// ----------------------------------------------------------------------
+// Locks
+// These do not have to be recursive and should be light-weight
+// in-process only locks. Only used for reserving arena's and to
+// maintain the abandoned list.
+// ----------------------------------------------------------------------
+#if _MSC_VER
+#pragma warning(disable:26110) // unlock with holding lock
+#endif
+
+#define mi_lock(lock) for(bool _go = (mi_lock_acquire(lock),true); _go; (mi_lock_release(lock), _go=false) )
+
+#if defined(_WIN32)
+
+#if 1
+#define mi_lock_t SRWLOCK // slim reader-writer lock
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ return TryAcquireSRWLockExclusive(lock);
+}
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ AcquireSRWLockExclusive(lock);
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ ReleaseSRWLockExclusive(lock);
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ InitializeSRWLock(lock);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ (void)(lock);
+}
+
+#else
+#define mi_lock_t CRITICAL_SECTION
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ return TryEnterCriticalSection(lock);
+}
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ EnterCriticalSection(lock);
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ LeaveCriticalSection(lock);
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ InitializeCriticalSection(lock);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ DeleteCriticalSection(lock);
+}
+
+#endif
+
+#elif defined(MI_USE_PTHREADS)
+
+void _mi_error_message(int err, const char* fmt, ...);
+
+#define mi_lock_t pthread_mutex_t
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ return (pthread_mutex_trylock(lock) == 0);
+}
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ const int err = pthread_mutex_lock(lock);
+ if (err != 0) {
+ _mi_error_message(err, "internal error: lock cannot be acquired\n");
+ }
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ pthread_mutex_unlock(lock);
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ pthread_mutex_init(lock, NULL);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ pthread_mutex_destroy(lock);
+}
+
+#elif defined(__cplusplus)
+
+#include
+#define mi_lock_t std::mutex
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ return lock->try_lock();
+}
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ lock->lock();
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ lock->unlock();
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ (void)(lock);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ (void)(lock);
+}
+
+#else
+
+// fall back to poor man's locks.
+// this should only be the case in a single-threaded environment (like __wasi__)
+
+#define mi_lock_t _Atomic(uintptr_t)
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ uintptr_t expected = 0;
+ return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
+}
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
+ if (mi_lock_try_acquire(lock)) return;
+ mi_atomic_yield();
+ }
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ mi_atomic_store_release(lock, (uintptr_t)0);
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ mi_lock_release(lock);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ (void)(lock);
+}
+
+#endif
+
+
+#endif // __MIMALLOC_ATOMIC_H
diff --git a/compat/mimalloc/mimalloc/internal.h b/compat/mimalloc/mimalloc/internal.h
new file mode 100644
index 00000000000000..e78d0fc06f2d14
--- /dev/null
+++ b/compat/mimalloc/mimalloc/internal.h
@@ -0,0 +1,1153 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_INTERNAL_H
+#define MIMALLOC_INTERNAL_H
+
+// --------------------------------------------------------------------------
+// This file contains the internal API's of mimalloc and various utility
+// functions and macros.
+// --------------------------------------------------------------------------
+
+#include "types.h"
+#include "track.h"
+
+
+// --------------------------------------------------------------------------
+// Compiler defines
+// --------------------------------------------------------------------------
+
+#if (MI_DEBUG>0)
+#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
+#else
+#define mi_trace_message(...)
+#endif
+
+#define mi_decl_cache_align mi_decl_align(64)
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
+#pragma warning(disable:26812) // unscoped enum warning
+#define mi_decl_noinline __declspec(noinline)
+#define mi_decl_thread __declspec(thread)
+#define mi_decl_align(a) __declspec(align(a))
+#define mi_decl_noreturn __declspec(noreturn)
+#define mi_decl_weak
+#define mi_decl_hidden
+#define mi_decl_cold
+#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
+#define mi_decl_noinline __attribute__((noinline))
+#define mi_decl_thread __thread
+#define mi_decl_align(a) __attribute__((aligned(a)))
+#define mi_decl_noreturn __attribute__((noreturn))
+#define mi_decl_weak __attribute__((weak))
+#define mi_decl_hidden __attribute__((visibility("hidden")))
+#if (__GNUC__ >= 4) || defined(__clang__)
+#define mi_decl_cold __attribute__((cold))
+#else
+#define mi_decl_cold
+#endif
+#elif __cplusplus >= 201103L // c++11
+#define mi_decl_noinline
+#define mi_decl_thread thread_local
+#define mi_decl_align(a) alignas(a)
+#define mi_decl_noreturn [[noreturn]]
+#define mi_decl_weak
+#define mi_decl_hidden
+#define mi_decl_cold
+#else
+#define mi_decl_noinline
+#define mi_decl_thread __thread // hope for the best :-)
+#define mi_decl_align(a)
+#define mi_decl_noreturn
+#define mi_decl_weak
+#define mi_decl_hidden
+#define mi_decl_cold
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define mi_unlikely(x) (__builtin_expect(!!(x),false))
+#define mi_likely(x) (__builtin_expect(!!(x),true))
+#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
+#define mi_unlikely(x) (x) [[unlikely]]
+#define mi_likely(x) (x) [[likely]]
+#else
+#define mi_unlikely(x) (x)
+#define mi_likely(x) (x)
+#endif
+
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__cplusplus)
+#define mi_decl_externc extern "C"
+#else
+#define mi_decl_externc
+#endif
+
+#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
+#define __wasi__
+#endif
+
+
+// --------------------------------------------------------------------------
+// Internal functions
+// --------------------------------------------------------------------------
+
+// "libc.c"
+#include
+int _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
+int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
+char _mi_toupper(char c);
+int _mi_strnicmp(const char* s, const char* t, size_t n);
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
+void _mi_strlcat(char* dest, const char* src, size_t dest_size);
+size_t _mi_strlen(const char* s);
+size_t _mi_strnlen(const char* s, size_t max_len);
+bool _mi_getenv(const char* name, char* result, size_t result_size);
+
+// "options.c"
+void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
+void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
+void _mi_message(const char* fmt, ...);
+void _mi_warning_message(const char* fmt, ...);
+void _mi_verbose_message(const char* fmt, ...);
+void _mi_trace_message(const char* fmt, ...);
+void _mi_options_init(void);
+long _mi_option_get_fast(mi_option_t option);
+void _mi_error_message(int err, const char* fmt, ...);
+
+// random.c
+void _mi_random_init(mi_random_ctx_t* ctx);
+void _mi_random_init_weak(mi_random_ctx_t* ctx);
+void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
+void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
+uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
+uintptr_t _mi_heap_random_next(mi_heap_t* heap);
+uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
+static inline uintptr_t _mi_random_shuffle(uintptr_t x);
+
+// init.c
+extern mi_decl_hidden mi_decl_cache_align mi_stats_t _mi_stats_main;
+extern mi_decl_hidden mi_decl_cache_align const mi_page_t _mi_page_empty;
+void _mi_auto_process_init(void);
+void mi_cdecl _mi_auto_process_done(void) mi_attr_noexcept;
+bool _mi_is_redirected(void);
+bool _mi_allocator_init(const char** message);
+void _mi_allocator_done(void);
+bool _mi_is_main_thread(void);
+size_t _mi_current_thread_count(void);
+bool _mi_preloading(void); // true while the C runtime is not initialized yet
+void _mi_thread_done(mi_heap_t* heap);
+void _mi_thread_data_collect(void);
+void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
+mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
+mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
+mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
+void _mi_heap_guarded_init(mi_heap_t* heap);
+
+// os.c
+void _mi_os_init(void); // called from process init
+void* _mi_os_alloc(size_t size, mi_memid_t* memid);
+void* _mi_os_zalloc(size_t size, mi_memid_t* memid);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid);
+void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
+
+size_t _mi_os_page_size(void);
+size_t _mi_os_good_alloc_size(size_t size);
+bool _mi_os_has_overcommit(void);
+bool _mi_os_has_virtual_reserve(void);
+
+bool _mi_os_reset(void* addr, size_t size);
+bool _mi_os_decommit(void* addr, size_t size);
+bool _mi_os_unprotect(void* addr, size_t size);
+bool _mi_os_purge(void* p, size_t size);
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size);
+void _mi_os_reuse(void* p, size_t size);
+mi_decl_nodiscard bool _mi_os_commit(void* p, size_t size, bool* is_zero);
+mi_decl_nodiscard bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size);
+bool _mi_os_protect(void* addr, size_t size);
+
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid);
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid);
+
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
+bool _mi_os_canuse_large_page(size_t size, size_t alignment);
+size_t _mi_os_large_page_size(void);
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
+
+int _mi_os_numa_node_count(void);
+int _mi_os_numa_node(void);
+
+// arena.c
+mi_arena_id_t _mi_arena_id_none(void);
+void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid);
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
+bool _mi_arena_contains(const void* p);
+void _mi_arenas_collect(bool force_purge);
+void _mi_arena_unsafe_destroy_all(void);
+
+bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment);
+void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
+
+void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
+void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
+
+typedef struct mi_arena_field_cursor_s { // abstract struct
+ size_t os_list_count; // max entries to visit in the OS abandoned list
+ size_t start; // start arena idx (may need to be wrapped)
+ size_t end; // end arena idx (exclusive, may need to be wrapped)
+ size_t bitmap_idx; // current bit idx for an arena
+ mi_subproc_t* subproc; // only visit blocks in this sub-process
+ bool visit_all; // ensure all abandoned blocks are seen (blocking)
+ bool hold_visit_lock; // if the subproc->abandoned_os_visit_lock is held
+} mi_arena_field_cursor_t;
+void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, bool visit_all, mi_arena_field_cursor_t* current);
+mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous);
+void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current);
+
+// "segment-map.c"
+void _mi_segment_map_allocated_at(const mi_segment_t* segment);
+void _mi_segment_map_freed_at(const mi_segment_t* segment);
+void _mi_segment_map_unsafe_destroy(void);
+
+// "segment.c"
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
+void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
+void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
+bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
+void _mi_segment_collect(mi_segment_t* segment, bool force);
+
+#if MI_HUGE_PAGE_ABANDON
+void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+#else
+void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+#endif
+
+uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
+void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
+void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
+bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
+bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
+
+// "page.c"
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment, size_t* usable) mi_attr_noexcept mi_attr_malloc;
+
+void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
+void _mi_page_unfull(mi_page_t* page);
+void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
+void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
+void _mi_page_force_abandon(mi_page_t* page);
+
+void _mi_heap_delayed_free_all(mi_heap_t* heap);
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
+void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
+
+void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
+void _mi_deferred_free(mi_heap_t* heap, bool force);
+
+void _mi_page_free_collect(mi_page_t* page,bool force);
+void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
+
+size_t _mi_page_stats_bin(const mi_page_t* page); // for stats
+size_t _mi_bin_size(size_t bin); // for stats
+size_t _mi_bin(size_t size); // for stats
+
+// "heap.c"
+void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
+void _mi_heap_destroy_pages(mi_heap_t* heap);
+void _mi_heap_collect_abandon(mi_heap_t* heap);
+void _mi_heap_set_default_direct(mi_heap_t* heap);
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
+void _mi_heap_unsafe_destroy_all(mi_heap_t* heap);
+mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
+void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
+bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
+
+// "stats.c"
+void _mi_stats_done(mi_stats_t* stats);
+void _mi_stats_merge_thread(mi_tld_t* tld);
+mi_msecs_t _mi_clock_now(void);
+mi_msecs_t _mi_clock_end(mi_msecs_t start);
+mi_msecs_t _mi_clock_start(void);
+
+// "alloc.c"
+void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero, size_t* usable) mi_attr_noexcept; // called from `_mi_malloc_generic`
+void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
+void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
+void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
+void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment, size_t* usable) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
+void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero, size_t* usable_pre, size_t* usable_post) mi_attr_noexcept;
+mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p);
+bool _mi_free_delayed_block(mi_block_t* block);
+void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
+
+#if MI_DEBUG>1
+bool _mi_page_is_valid(mi_page_t* page);
+#endif
+
+
+/* -----------------------------------------------------------
+ Error codes passed to `_mi_fatal_error`
+ All are recoverable but EFAULT is a serious error and aborts by default in secure mode.
+ For portability define undefined error codes using common Unix codes:
+
+----------------------------------------------------------- */
+#include
+#ifndef EAGAIN // double free
+#define EAGAIN (11)
+#endif
+#ifndef ENOMEM // out of memory
+#define ENOMEM (12)
+#endif
+#ifndef EFAULT // corrupted free-list or meta-data
+#define EFAULT (14)
+#endif
+#ifndef EINVAL // trying to free an invalid pointer
+#define EINVAL (22)
+#endif
+#ifndef EOVERFLOW // count*size overflow
+#define EOVERFLOW (75)
+#endif
+
+
+// ------------------------------------------------------
+// Assertions
+// ------------------------------------------------------
+
+#if (MI_DEBUG)
+// use our own assertion to print without memory allocation
+mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func) mi_attr_noexcept;
+#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
+#else
+#define mi_assert(x)
+#endif
+
+#if (MI_DEBUG>1)
+#define mi_assert_internal mi_assert
+#else
+#define mi_assert_internal(x)
+#endif
+
+#if (MI_DEBUG>2)
+#define mi_assert_expensive mi_assert
+#else
+#define mi_assert_expensive(x)
+#endif
+
+
+
+/* -----------------------------------------------------------
+ Inlined definitions
+----------------------------------------------------------- */
+#define MI_UNUSED(x) (void)(x)
+#if (MI_DEBUG>0)
+#define MI_UNUSED_RELEASE(x)
+#else
+#define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
+#endif
+
+#define MI_INIT4(x) x(),x(),x(),x()
+#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
+#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
+#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
+#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
+#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
+#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
+#define MI_INIT74(x) MI_INIT64(x),MI_INIT8(x),x(),x()
+
+#include
+// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
+#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
+
+// Is `x` a power of two? (0 is considered a power of two)
+static inline bool _mi_is_power_of_two(uintptr_t x) {
+ return ((x & (x - 1)) == 0);
+}
+
+// Is a pointer aligned?
+static inline bool _mi_is_aligned(void* p, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ return (((uintptr_t)p % alignment) == 0);
+}
+
+// Align upwards
+static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ uintptr_t mask = alignment - 1;
+ if ((alignment & mask) == 0) { // power of two?
+ return ((sz + mask) & ~mask);
+ }
+ else {
+ return (((sz + mask)/alignment)*alignment);
+ }
+}
+
+// Align downwards
+static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ uintptr_t mask = alignment - 1;
+ if ((alignment & mask) == 0) { // power of two?
+ return (sz & ~mask);
+ }
+ else {
+ return ((sz / alignment) * alignment);
+ }
+}
+
+// Align a pointer upwards
+static inline void* mi_align_up_ptr(void* p, size_t alignment) {
+ return (void*)_mi_align_up((uintptr_t)p, alignment);
+}
+
+// Align a pointer downwards
+static inline void* mi_align_down_ptr(void* p, size_t alignment) {
+ return (void*)_mi_align_down((uintptr_t)p, alignment);
+}
+
+
+// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
+static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
+ mi_assert_internal(divider != 0);
+ return (divider == 0 ? size : ((size + divider - 1) / divider));
+}
+
+
+// clamp an integer
+static inline size_t _mi_clamp(size_t sz, size_t min, size_t max) {
+ if (sz < min) return min;
+ else if (sz > max) return max;
+ else return sz;
+}
+
+// Is memory zero initialized?
+static inline bool mi_mem_is_zero(const void* p, size_t size) {
+ for (size_t i = 0; i < size; i++) {
+ if (((uint8_t*)p)[i] != 0) return false;
+ }
+ return true;
+}
+
+
+// Align a byte size to a size in _machine words_,
+// i.e. byte size == `wsize*sizeof(void*)`.
+static inline size_t _mi_wsize_from_size(size_t size) {
+ mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t));
+ return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
+}
+
+// Overflow detecting multiply
+#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
+#include // UINT_MAX, ULONG_MAX
+#if defined(_CLOCK_T) // for Illumos
+#undef _CLOCK_T
+#endif
+static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
+ #if (SIZE_MAX == ULONG_MAX)
+ return __builtin_umull_overflow(count, size, (unsigned long *)total);
+ #elif (SIZE_MAX == UINT_MAX)
+ return __builtin_umul_overflow(count, size, (unsigned int *)total);
+ #else
+ return __builtin_umulll_overflow(count, size, (unsigned long long *)total);
+ #endif
+}
+#else /* __builtin_umul_overflow is unavailable */
+static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
+ #define MI_MUL_COULD_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
+ *total = count * size;
+ // note: gcc/clang optimize this to directly check the overflow flag
+ return ((size >= MI_MUL_COULD_OVERFLOW || count >= MI_MUL_COULD_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
+}
+#endif
+
+// Safe multiply `count*size` into `total`; return `true` on overflow.
+static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) {
+ if (count==1) { // quick check for the case where count is one (common for C++ allocators)
+ *total = size;
+ return false;
+ }
+ else if mi_unlikely(mi_mul_overflow(count, size, total)) {
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
+ #endif
+ *total = SIZE_MAX;
+ return true;
+ }
+ else return false;
+}
+
+
+/*----------------------------------------------------------------------------------------
+ Heap functions
+------------------------------------------------------------------------------------------- */
+
+extern mi_decl_hidden const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
+
+static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
+ return (heap->tld->heap_backing == heap);
+}
+
+static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
+ mi_assert_internal(heap != NULL);
+ return (heap != NULL && heap != &_mi_heap_empty);
+}
+
+static inline uintptr_t _mi_ptr_cookie(const void* p) {
+ extern mi_decl_hidden mi_heap_t _mi_heap_main;
+ mi_assert_internal(_mi_heap_main.cookie != 0);
+ return ((uintptr_t)p ^ _mi_heap_main.cookie);
+}
+
+/* -----------------------------------------------------------
+ Pages
+----------------------------------------------------------- */
+
+static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
+ mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
+ const size_t idx = _mi_wsize_from_size(size);
+ mi_assert_internal(idx < MI_PAGES_DIRECT);
+ return heap->pages_free_direct[idx];
+}
+
+// Segment that contains the pointer
+// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
+// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
+// therefore we align one byte before `p`.
+// We check for NULL afterwards on 64-bit systems to improve codegen for `mi_free`.
+static inline mi_segment_t* _mi_ptr_segment(const void* p) {
+ mi_segment_t* const segment = (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
+ #if MI_INTPTR_SIZE <= 4
+ return (p==NULL ? NULL : segment);
+ #else
+ return ((intptr_t)segment <= 0 ? NULL : segment);
+ #endif
+}
+
+static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
+ mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
+ return (mi_page_t*)(s);
+}
+
+static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
+ mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
+ return (mi_slice_t*)(p);
+}
+
+// Segment belonging to a page
+static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
+ mi_assert_internal(page!=NULL);
+ mi_segment_t* segment = _mi_ptr_segment(page);
+ mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
+ return segment;
+}
+
+static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
+ mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
+ mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
+ mi_assert_internal(start->slice_offset == 0);
+ mi_assert_internal(start + start->slice_count > slice);
+ return start;
+}
+
+// Get the page containing the pointer (performance critical as it is called in mi_free)
+static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
+ mi_assert_internal(p > (void*)segment);
+ ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
+ mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
+ size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
+ mi_assert_internal(idx <= segment->slice_entries);
+ mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
+ mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
+ mi_assert_internal(slice->slice_offset == 0);
+ mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
+ return mi_slice_to_page(slice);
+}
+
+// Quick page start for initialized pages
+static inline uint8_t* mi_page_start(const mi_page_t* page) {
+ mi_assert_internal(page->page_start != NULL);
+ mi_assert_expensive(_mi_segment_page_start(_mi_page_segment(page),page,NULL) == page->page_start);
+ return page->page_start;
+}
+
+// Get the page containing the pointer
+static inline mi_page_t* _mi_ptr_page(void* p) {
+ mi_assert_internal(p!=NULL);
+ return _mi_segment_page_of(_mi_ptr_segment(p), p);
+}
+
+// Get the block size of a page (special case for huge objects)
+static inline size_t mi_page_block_size(const mi_page_t* page) {
+ mi_assert_internal(page->block_size > 0);
+ return page->block_size;
+}
+
+static inline bool mi_page_is_huge(const mi_page_t* page) {
+ mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) ||
+ (!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE));
+ return page->is_huge;
+}
+
+// Get the usable block size of a page without fixed padding.
+// This may still include internal padding due to alignment and rounding up size classes.
+static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
+ return mi_page_block_size(page) - MI_PADDING_SIZE;
+}
+
+// size of a segment
+static inline size_t mi_segment_size(mi_segment_t* segment) {
+ return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
+}
+
+static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
+ return (uint8_t*)segment + mi_segment_size(segment);
+}
+
+// Thread free access
+static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
+ return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3);
+}
+
+static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
+ return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3);
+}
+
+// Heap access
+static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
+ return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap));
+}
+
+static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
+ mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
+ mi_atomic_store_release(&page->xheap,(uintptr_t)heap);
+ if (heap != NULL) { page->heap_tag = heap->tag; }
+}
+
+// Thread free flag helpers
+static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
+ return (mi_block_t*)(tf & ~0x03);
+}
+static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
+ return (mi_delayed_t)(tf & 0x03);
+}
+static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
+ return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
+}
+static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
+ return mi_tf_make(mi_tf_block(tf),delayed);
+}
+static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
+ return mi_tf_make(block, mi_tf_delayed(tf));
+}
+
+// are all blocks in a page freed?
+// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
+static inline bool mi_page_all_free(const mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ return (page->used == 0);
+}
+
+// are there any available blocks?
+static inline bool mi_page_has_any_available(const mi_page_t* page) {
+ mi_assert_internal(page != NULL && page->reserved > 0);
+ return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
+}
+
+// are there immediately available blocks, i.e. blocks available on the free list.
+static inline bool mi_page_immediate_available(const mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ return (page->free != NULL);
+}
+
+// is more than 7/8th of a page in use?
+static inline bool mi_page_is_mostly_used(const mi_page_t* page) {
+ if (page==NULL) return true;
+ uint16_t frac = page->reserved / 8U;
+ return (page->reserved - page->used <= frac);
+}
+
+static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) {
+ return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
+}
+
+
+
+//-----------------------------------------------------------
+// Page flags
+//-----------------------------------------------------------
+static inline bool mi_page_is_in_full(const mi_page_t* page) {
+ return page->flags.x.in_full;
+}
+
+static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
+ page->flags.x.in_full = in_full;
+}
+
+static inline bool mi_page_has_aligned(const mi_page_t* page) {
+ return page->flags.x.has_aligned;
+}
+
+static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
+ page->flags.x.has_aligned = has_aligned;
+}
+
+/* -------------------------------------------------------------------
+ Guarded objects
+------------------------------------------------------------------- */
+#if MI_GUARDED
+static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) {
+ const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block;
+ return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED);
+}
+
+static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
+ // this code is written to result in fast assembly as it is on the hot path for allocation
+ const size_t count = heap->guarded_sample_count - 1; // if the rate was 0, this will underflow and count for a long time..
+ if mi_likely(count != 0) {
+ // no sample
+ heap->guarded_sample_count = count;
+ return false;
+ }
+ else if (size >= heap->guarded_size_min && size <= heap->guarded_size_max) {
+ // use guarded allocation
+ heap->guarded_sample_count = heap->guarded_sample_rate; // reset
+ return (heap->guarded_sample_rate != 0);
+ }
+ else {
+ // failed size criteria, rewind count (but don't write to an empty heap)
+ if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
+ return false;
+ }
+}
+
+mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
+
+#endif
+
+
+/* -------------------------------------------------------------------
+Encoding/Decoding the free list next pointers
+
+This is to protect against buffer overflow exploits where the
+free list is mutated. Many hardened allocators xor the next pointer `p`
+with a secret key `k1`, as `p^k1`. This prevents overwriting with known
+values but might be still too weak: if the attacker can guess
+the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
+Moreover, if multiple blocks can be read as well, the attacker can
+xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
+about the pointers (and subsequently `k1`).
+
+Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<> (MI_INTPTR_BITS - shift))));
+}
+static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
+ shift %= MI_INTPTR_BITS;
+ return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift))));
+}
+
+static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
+ void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
+ return (p==null ? NULL : p);
+}
+
+static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
+ uintptr_t x = (uintptr_t)(p==NULL ? null : p);
+ return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
+}
+
+static inline uint32_t mi_ptr_encode_canary(const void* null, const void* p, const uintptr_t* keys) {
+ const uint32_t x = (uint32_t)(mi_ptr_encode(null,p,keys));
+ // make the lowest byte 0 to prevent spurious read overflows which could be a security issue (issue #951)
+ #ifdef MI_BIG_ENDIAN
+ return (x & 0x00FFFFFF);
+ #else
+ return (x & 0xFFFFFF00);
+ #endif
+}
+
+static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
+ mi_track_mem_defined(block,sizeof(mi_block_t));
+ mi_block_t* next;
+ #ifdef MI_ENCODE_FREELIST
+ next = (mi_block_t*)mi_ptr_decode(null, block->next, keys);
+ #else
+ MI_UNUSED(keys); MI_UNUSED(null);
+ next = (mi_block_t*)block->next;
+ #endif
+ mi_track_mem_noaccess(block,sizeof(mi_block_t));
+ return next;
+}
+
+static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
+ mi_track_mem_undefined(block,sizeof(mi_block_t));
+ #ifdef MI_ENCODE_FREELIST
+ block->next = mi_ptr_encode(null, next, keys);
+ #else
+ MI_UNUSED(keys); MI_UNUSED(null);
+ block->next = (mi_encoded_t)next;
+ #endif
+ mi_track_mem_noaccess(block,sizeof(mi_block_t));
+}
+
+static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
+ #ifdef MI_ENCODE_FREELIST
+ mi_block_t* next = mi_block_nextx(page,block,page->keys);
+ // check for free list corruption: is `next` at least in the same page?
+ // TODO: check if `next` is `page->block_size` aligned?
+ if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
+ _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
+ next = NULL;
+ }
+ return next;
+ #else
+ MI_UNUSED(page);
+ return mi_block_nextx(page,block,NULL);
+ #endif
+}
+
+static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
+ #ifdef MI_ENCODE_FREELIST
+ mi_block_set_nextx(page,block,next, page->keys);
+ #else
+ MI_UNUSED(page);
+ mi_block_set_nextx(page,block,next,NULL);
+ #endif
+}
+
+
+// -------------------------------------------------------------------
+// commit mask
+// -------------------------------------------------------------------
+
+static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ cm->mask[i] = 0;
+ }
+}
+
+static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ cm->mask[i] = ~((size_t)0);
+ }
+}
+
+static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if (cm->mask[i] != 0) return false;
+ }
+ return true;
+}
+
+static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if (cm->mask[i] != ~((size_t)0)) return false;
+ }
+ return true;
+}
+
+// defined in `segment.c`:
+size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
+size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
+
+#define mi_commit_mask_foreach(cm,idx,count) \
+ idx = 0; \
+ while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
+
+#define mi_commit_mask_foreach_end() \
+ idx += count; \
+ }
+
+
+
+/* -----------------------------------------------------------
+ memory id's
+----------------------------------------------------------- */
+
+static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
+ mi_memid_t memid;
+ _mi_memzero_var(memid);
+ memid.memkind = memkind;
+ return memid;
+}
+
+static inline mi_memid_t _mi_memid_none(void) {
+ return _mi_memid_create(MI_MEM_NONE);
+}
+
+static inline mi_memid_t _mi_memid_create_os(void* base, size_t size, bool committed, bool is_zero, bool is_large) {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
+ memid.mem.os.base = base;
+ memid.mem.os.size = size;
+ memid.initially_committed = committed;
+ memid.initially_zero = is_zero;
+ memid.is_pinned = is_large;
+ return memid;
+}
+
+
+// -------------------------------------------------------------------
+// Fast "random" shuffle
+// -------------------------------------------------------------------
+
+static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
+ if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
+#if (MI_INTPTR_SIZE>=8)
+ // by Sebastiano Vigna, see:
+ x ^= x >> 30;
+ x *= 0xbf58476d1ce4e5b9UL;
+ x ^= x >> 27;
+ x *= 0x94d049bb133111ebUL;
+ x ^= x >> 31;
+#elif (MI_INTPTR_SIZE==4)
+ // by Chris Wellons, see:
+ x ^= x >> 16;
+ x *= 0x7feb352dUL;
+ x ^= x >> 15;
+ x *= 0x846ca68bUL;
+ x ^= x >> 16;
+#endif
+ return x;
+}
+
+
+
+// -----------------------------------------------------------------------
+// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
+// -----------------------------------------------------------------------
+
+#if defined(__GNUC__)
+
+#include // LONG_MAX
+#define MI_HAVE_FAST_BITSCAN
+static inline size_t mi_clz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (SIZE_MAX == ULONG_MAX)
+ return __builtin_clzl(x);
+ #else
+ return __builtin_clzll(x);
+ #endif
+}
+static inline size_t mi_ctz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (SIZE_MAX == ULONG_MAX)
+ return __builtin_ctzl(x);
+ #else
+ return __builtin_ctzll(x);
+ #endif
+}
+
+#elif defined(_MSC_VER)
+
+#include // LONG_MAX
+#include // BitScanReverse64
+#define MI_HAVE_FAST_BITSCAN
+static inline size_t mi_clz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ unsigned long idx;
+ #if (SIZE_MAX == ULONG_MAX)
+ _BitScanReverse(&idx, x);
+ #else
+ _BitScanReverse64(&idx, x);
+ #endif
+ return ((MI_SIZE_BITS - 1) - (size_t)idx);
+}
+static inline size_t mi_ctz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ unsigned long idx;
+ #if (SIZE_MAX == ULONG_MAX)
+ _BitScanForward(&idx, x);
+ #else
+ _BitScanForward64(&idx, x);
+ #endif
+ return (size_t)idx;
+}
+
+#else
+
+static inline size_t mi_ctz_generic32(uint32_t x) {
+ // de Bruijn multiplication, see
+ static const uint8_t debruijn[32] = {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
+ };
+ if (x==0) return 32;
+ return debruijn[(uint32_t)((x & -(int32_t)x) * (uint32_t)(0x077CB531U)) >> 27];
+}
+
+static inline size_t mi_clz_generic32(uint32_t x) {
+ // de Bruijn multiplication, see
+ static const uint8_t debruijn[32] = {
+ 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
+ 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0
+ };
+ if (x==0) return 32;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ return debruijn[(uint32_t)(x * (uint32_t)(0x07C4ACDDU)) >> 27];
+}
+
+static inline size_t mi_ctz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (MI_SIZE_BITS <= 32)
+ return mi_ctz_generic32((uint32_t)x);
+ #else
+ const uint32_t lo = (uint32_t)x;
+ if (lo != 0) {
+ return mi_ctz_generic32(lo);
+ }
+ else {
+ return (32 + mi_ctz_generic32((uint32_t)(x>>32)));
+ }
+ #endif
+}
+
+static inline size_t mi_clz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (MI_SIZE_BITS <= 32)
+ return mi_clz_generic32((uint32_t)x);
+ #else
+ const uint32_t hi = (uint32_t)(x>>32);
+ if (hi != 0) {
+ return mi_clz_generic32(hi);
+ }
+ else {
+ return 32 + mi_clz_generic32((uint32_t)x);
+ }
+ #endif
+}
+
+#endif
+
+// "bit scan reverse": Return index of the highest bit (or MI_SIZE_BITS if `x` is zero)
+static inline size_t mi_bsr(size_t x) {
+ return (x==0 ? MI_SIZE_BITS : MI_SIZE_BITS - 1 - mi_clz(x));
+}
+
+size_t _mi_popcount_generic(size_t x);
+
+static inline size_t mi_popcount(size_t x) {
+ if (x<=1) return x;
+ if (x==SIZE_MAX) return MI_SIZE_BITS;
+ #if defined(__GNUC__)
+ #if (SIZE_MAX == ULONG_MAX)
+ return __builtin_popcountl(x);
+ #else
+ return __builtin_popcountll(x);
+ #endif
+ #else
+ return _mi_popcount_generic(x);
+ #endif
+}
+
+// ---------------------------------------------------------------------------------
+// Provide our own `_mi_memcpy` for potential performance optimizations.
+//
+// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
+// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
+// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
+// ---------------------------------------------------------------------------------
+
+#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
+#include
+extern mi_decl_hidden bool _mi_cpu_has_fsrm;
+extern mi_decl_hidden bool _mi_cpu_has_erms;
+static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
+ if (_mi_cpu_has_fsrm && n <= 127) { // || (_mi_cpu_has_erms && n > 128)) {
+ __movsb((unsigned char*)dst, (const unsigned char*)src, n);
+ }
+ else {
+ memcpy(dst, src, n);
+ }
+}
+static inline void _mi_memzero(void* dst, size_t n) {
+ if (_mi_cpu_has_fsrm && n <= 127) { // || (_mi_cpu_has_erms && n > 128)) {
+ __stosb((unsigned char*)dst, 0, n);
+ }
+ else {
+ memset(dst, 0, n);
+ }
+}
+#else
+static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
+ memcpy(dst, src, n);
+}
+static inline void _mi_memzero(void* dst, size_t n) {
+ memset(dst, 0, n);
+}
+#endif
+
+// -------------------------------------------------------------------------------
+// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
+// This is used for example in `mi_realloc`.
+// -------------------------------------------------------------------------------
+
+#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
+// On GCC/CLang we provide a hint that the pointers are word aligned.
+static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
+ mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
+ void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
+ const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
+ _mi_memcpy(adst, asrc, n);
+}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
+ _mi_memzero(adst, n);
+}
+#else
+// Default fallback on `_mi_memcpy`
+static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
+ mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
+ _mi_memcpy(dst, src, n);
+}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ _mi_memzero(dst, n);
+}
+#endif
+
+
+#endif
diff --git a/compat/mimalloc/mimalloc/prim.h b/compat/mimalloc/mimalloc/prim.h
new file mode 100644
index 00000000000000..f8abc8c48cea32
--- /dev/null
+++ b/compat/mimalloc/mimalloc/prim.h
@@ -0,0 +1,421 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_PRIM_H
+#define MIMALLOC_PRIM_H
+#include "internal.h" // mi_decl_hidden
+
+// --------------------------------------------------------------------------
+// This file specifies the primitive portability API.
+// Each OS/host needs to implement these primitives, see `src/prim`
+// for implementations on Window, macOS, WASI, and Linux/Unix.
+//
+// note: on all primitive functions, we always have result parameters != NULL, and:
+// addr != NULL and page aligned
+// size > 0 and page aligned
+// the return value is an error code as an `int` where 0 is success
+// --------------------------------------------------------------------------
+
+// OS memory configuration
+typedef struct mi_os_mem_config_s {
+ size_t page_size; // default to 4KiB
+ size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows)
+ size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB)
+ size_t physical_memory_in_kib; // physical memory size in KiB
+ size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization)
+ bool has_overcommit; // can we reserve more memory than can be actually committed?
+ bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc)
+ bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
+} mi_os_mem_config_t;
+
+// Initialize
+void _mi_prim_mem_init( mi_os_mem_config_t* config );
+
+// Free OS memory
+int _mi_prim_free(void* addr, size_t size );
+
+// Allocate OS memory. Return NULL on error.
+// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
+// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
+// which will later be committed explicitly using `_mi_prim_commit`.
+// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
+// The `hint_addr` address is either `NULL` or a preferred allocation address but can be ignored.
+// pre: !commit => !allow_large
+// try_alignment >= _mi_os_page_size() and a power of 2
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
+
+// Commit memory. Returns error code or 0 on success.
+// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
+// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
+int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
+
+// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
+// if the memory would need to be re-committed. For example, on Windows this is always true,
+// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
+// pre: needs_recommit != NULL
+int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
+
+// Reset memory. The range keeps being accessible but the content might be reset to zero at any moment.
+// Returns error code or 0 on success.
+int _mi_prim_reset(void* addr, size_t size);
+
+// Reuse memory. This is called for memory that is already committed but
+// may have been reset (`_mi_prim_reset`) or decommitted (`_mi_prim_decommit`) where `needs_recommit` was false.
+// Returns error code or 0 on success. On most platforms this is a no-op.
+int _mi_prim_reuse(void* addr, size_t size);
+
+// Protect memory. Returns error code or 0 on success.
+int _mi_prim_protect(void* addr, size_t size, bool protect);
+
+// Allocate huge (1GiB) pages possibly associated with a NUMA node.
+// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
+// pre: size > 0 and a multiple of 1GiB.
+// numa_node is either negative (don't care), or a numa node number.
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
+
+// Return the current NUMA node
+size_t _mi_prim_numa_node(void);
+
+// Return the number of logical NUMA nodes
+size_t _mi_prim_numa_node_count(void);
+
+// Clock ticks
+mi_msecs_t _mi_prim_clock_now(void);
+
+// Return process information (only for statistics)
+typedef struct mi_process_info_s {
+ mi_msecs_t elapsed;
+ mi_msecs_t utime;
+ mi_msecs_t stime;
+ size_t current_rss;
+ size_t peak_rss;
+ size_t current_commit;
+ size_t peak_commit;
+ size_t page_faults;
+} mi_process_info_t;
+
+void _mi_prim_process_info(mi_process_info_t* pinfo);
+
+// Default stderr output. (only for warnings etc. with verbose enabled)
+// msg != NULL && _mi_strlen(msg) > 0
+void _mi_prim_out_stderr( const char* msg );
+
+// Get an environment variable. (only for options)
+// name != NULL, result != NULL, result_size >= 64
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
+
+
+// Fill a buffer with strong randomness; return `false` on error or if
+// there is no strong randomization available.
+bool _mi_prim_random_buf(void* buf, size_t buf_len);
+
+// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
+void _mi_prim_thread_init_auto_done(void);
+
+// Called on process exit and may take action to clean up resources associated with the thread auto done.
+void _mi_prim_thread_done_auto_done(void);
+
+// Called when the default heap for a thread changes
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
+
+
+//-------------------------------------------------------------------
+// Access to TLS (thread local storage) slots.
+// We need fast access to both a unique thread id (in `free.c:mi_free`) and
+// to a thread-local heap pointer (in `alloc.c:mi_malloc`).
+// To achieve this we use specialized code for various platforms.
+//-------------------------------------------------------------------
+
+// On some libc + platform combinations we can directly access a thread-local storage (TLS) slot.
+// The TLS layout depends on both the OS and libc implementation so we use specific tests for each main platform.
+// If you test on another platform and it works please send a PR :-)
+// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
+//
+// Note: we would like to prefer `__builtin_thread_pointer()` nowadays instead of using assembly,
+// but unfortunately we can not detect support reliably (see issue #883)
+// We also use it on Apple OS as we use a TLS slot for the default heap there.
+#if defined(__GNUC__) && ( \
+ (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
+ || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \
+ || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
+ || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
+ || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
+ )
+
+#define MI_HAS_TLS_SLOT 1
+
+static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
+ void* res;
+ const size_t ofs = (slot*sizeof(void*));
+ #if defined(__i386__)
+ __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
+ #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
+ __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
+ #elif defined(__x86_64__)
+ __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
+ #elif defined(__arm__)
+ void** tcb; MI_UNUSED(ofs);
+ __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
+ res = tcb[slot];
+ #elif defined(__aarch64__)
+ void** tcb; MI_UNUSED(ofs);
+ #if defined(__APPLE__) // M1, issue #343
+ __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
+ #else
+ __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
+ #endif
+ res = tcb[slot];
+ #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781
+ MI_UNUSED(ofs);
+ res = pthread_getspecific(slot);
+ #endif
+ return res;
+}
+
+// setting a tls slot is only used on macOS for now
+static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
+ const size_t ofs = (slot*sizeof(void*));
+ #if defined(__i386__)
+ __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
+ #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
+ __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
+ #elif defined(__x86_64__)
+ __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
+ #elif defined(__arm__)
+ void** tcb; MI_UNUSED(ofs);
+ __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
+ tcb[slot] = value;
+ #elif defined(__aarch64__)
+ void** tcb; MI_UNUSED(ofs);
+ #if defined(__APPLE__) // M1, issue #343
+ __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
+ #else
+ __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
+ #endif
+ tcb[slot] = value;
+ #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781
+ MI_UNUSED(ofs);
+ pthread_setspecific(slot, value);
+ #endif
+}
+
+#elif _WIN32 && MI_WIN_USE_FIXED_TLS && !defined(MI_WIN_USE_FLS)
+
+// On windows we can store the thread-local heap at a fixed TLS slot to avoid
+// thread-local initialization checks in the fast path.
+// We allocate a user TLS slot at process initialization (see `windows/prim.c`)
+// and store the offset `_mi_win_tls_offset`.
+#define MI_HAS_TLS_SLOT 1 // 2 = we can reliably initialize the slot (saving a test on each malloc)
+
+extern mi_decl_hidden size_t _mi_win_tls_offset;
+
+#if MI_WIN_USE_FIXED_TLS > 1
+#define MI_TLS_SLOT (MI_WIN_USE_FIXED_TLS)
+#elif MI_SIZE_SIZE == 4
+#define MI_TLS_SLOT (0x0E10 + _mi_win_tls_offset) // User TLS slots
+#else
+#define MI_TLS_SLOT (0x1480 + _mi_win_tls_offset) // User TLS slots
+#endif
+
+static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
+ #if (_M_X64 || _M_AMD64) && !defined(_M_ARM64EC)
+ return (void*)__readgsqword((unsigned long)slot); // direct load at offset from gs
+ #elif _M_IX86 && !defined(_M_ARM64EC)
+ return (void*)__readfsdword((unsigned long)slot); // direct load at offset from fs
+ #else
+ return ((void**)NtCurrentTeb())[slot / sizeof(void*)];
+ #endif
+}
+static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
+ ((void**)NtCurrentTeb())[slot / sizeof(void*)] = value;
+}
+
+#endif
+
+
+
+//-------------------------------------------------------------------
+// Get a fast unique thread id.
+//
+// Getting the thread id should be performant as it is called in the
+// fast path of `_mi_free` and we specialize for various platforms as
+// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
+// We only require _mi_prim_thread_id() to return a unique id
+// for each thread (unequal to zero).
+//-------------------------------------------------------------------
+
+
+// Do we have __builtin_thread_pointer? This would be the preferred way to get a unique thread id
+// but unfortunately, it seems we cannot test for this reliably at this time (see issue #883)
+// Nevertheless, it seems needed on older graviton platforms (see issue #851).
+// For now, we only enable this for specific platforms.
+#if !defined(__APPLE__) /* on apple (M1) the wrong register is read (tpidr_el0 instead of tpidrro_el0) so fall back to TLS slot assembly ()*/ \
+ && !defined(__CYGWIN__) \
+ && !defined(MI_LIBC_MUSL) \
+ && (!defined(__clang_major__) || __clang_major__ >= 14) /* older clang versions emit bad code; fall back to using the TLS slot () */
+ #if (defined(__GNUC__) && (__GNUC__ >= 7) && defined(__aarch64__)) /* aarch64 for older gcc versions (issue #851) */ \
+ || (defined(__GNUC__) && (__GNUC__ >= 11) && defined(__x86_64__)) \
+ || (defined(__clang_major__) && (__clang_major__ >= 14) && (defined(__aarch64__) || defined(__x86_64__)))
+ #define MI_USE_BUILTIN_THREAD_POINTER 1
+ #endif
+#endif
+
+
+
+// defined in `init.c`; do not use these directly
+extern mi_decl_hidden mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
+extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called?
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
+
+// Get a unique id for the current thread.
+#if defined(MI_PRIM_THREAD_ID)
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ return MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488)
+}
+
+#elif defined(_WIN32)
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ // Windows: works on Intel and ARM in both 32- and 64-bit
+ return (uintptr_t)NtCurrentTeb();
+}
+
+#elif MI_USE_BUILTIN_THREAD_POINTER
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ // Works on most Unix based platforms with recent compilers
+ return (uintptr_t)__builtin_thread_pointer();
+}
+
+#elif MI_HAS_TLS_SLOT
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ #if defined(__BIONIC__)
+ // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
+ // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
+ return (uintptr_t)mi_prim_tls_slot(1);
+ #else
+ // in all our other targets, slot 0 is the thread id
+ // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
+ // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
+ return (uintptr_t)mi_prim_tls_slot(0);
+ #endif
+}
+
+#else
+
+// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ return (uintptr_t)&_mi_heap_default;
+}
+
+#endif
+
+
+
+/* ----------------------------------------------------------------------------------------
+Get the thread local default heap: `_mi_prim_get_default_heap()`
+
+This is inlined here as it is on the fast path for allocation functions.
+
+On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
+__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
+that the storage will always be available (allocated on the thread stacks).
+
+On some platforms though we cannot use that when overriding `malloc` since the underlying
+TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
+We try to circumvent this in an efficient way:
+- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
+ loader itself calls `malloc` even before the modules are initialized.
+- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
+- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
+------------------------------------------------------------------------------------------- */
+
+static inline mi_heap_t* mi_prim_get_default_heap(void);
+
+#if defined(MI_MALLOC_OVERRIDE)
+#if defined(__APPLE__) // macOS
+ #define MI_TLS_SLOT 89 // seems unused?
+ // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
+ // see
+#elif defined(__OpenBSD__)
+ // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
+ // see
+ #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
+ // #elif defined(__DragonFly__)
+ // #warning "mimalloc is not working correctly on DragonFly yet."
+ // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?)
+#elif defined(__ANDROID__)
+ // See issue #381
+ #define MI_TLS_PTHREAD
+#endif
+#endif
+
+
+#if MI_TLS_SLOT
+# if !defined(MI_HAS_TLS_SLOT)
+# error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined"
+# endif
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
+ #if MI_HAS_TLS_SLOT == 1 // check if the TLS slot is initialized
+ if mi_unlikely(heap == NULL) {
+ #ifdef __GNUC__
+ __asm(""); // prevent conditional load of the address of _mi_heap_empty
+ #endif
+ heap = (mi_heap_t*)&_mi_heap_empty;
+ }
+ #endif
+ return heap;
+}
+
+#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
+
+static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
+ pthread_t self = pthread_self();
+ #if defined(__DragonFly__)
+ if (self==NULL) return NULL;
+ #endif
+ return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
+}
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
+ if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
+ mi_heap_t* heap = *pheap;
+ if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
+ return heap;
+}
+
+#elif defined(MI_TLS_PTHREAD)
+
+extern mi_decl_hidden pthread_key_t _mi_heap_default_key;
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
+ return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
+}
+
+#else // default using a thread local variable; used on most platforms.
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ #if defined(MI_TLS_RECURSE_GUARD)
+ if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
+ #endif
+ return _mi_heap_default;
+}
+
+#endif // mi_prim_get_default_heap()
+
+
+#endif // MIMALLOC_PRIM_H
diff --git a/compat/mimalloc/mimalloc/track.h b/compat/mimalloc/mimalloc/track.h
new file mode 100644
index 00000000000000..4b5709e2b54110
--- /dev/null
+++ b/compat/mimalloc/mimalloc/track.h
@@ -0,0 +1,145 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_TRACK_H
+#define MIMALLOC_TRACK_H
+
+/* ------------------------------------------------------------------------------------------------------
+Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
+These can be defined for tracking allocation:
+
+ #define mi_track_malloc_size(p,reqsize,size,zero)
+ #define mi_track_free_size(p,_size)
+
+The macros are set up such that the size passed to `mi_track_free_size`
+always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
+The `reqsize` is what the user requested, and `size >= reqsize`.
+The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
+or otherwise it is the usable block size which may be larger than the original request.
+Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
+The `zero` parameter is `true` if the allocated block is zero initialized.
+
+Optional:
+
+ #define mi_track_align(p,alignedp,offset,size)
+ #define mi_track_resize(p,oldsize,newsize)
+ #define mi_track_init()
+
+The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
+The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
+The `mi_track_resize` is currently unused but could be called on reallocations within a block.
+`mi_track_init` is called at program start.
+
+The following macros are for tools like asan and valgrind to track whether memory is
+defined, undefined, or not accessible at all:
+
+ #define mi_track_mem_defined(p,size)
+ #define mi_track_mem_undefined(p,size)
+ #define mi_track_mem_noaccess(p,size)
+
+-------------------------------------------------------------------------------------------------------*/
+
+#if MI_TRACK_VALGRIND
+// valgrind tool
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
+#define MI_TRACK_TOOL "valgrind"
+
+#include
+#include
+
+#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
+#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
+#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
+#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
+#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
+#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
+
+#elif MI_TRACK_ASAN
+// address sanitizer
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "asan"
+
+#include
+
+#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
+#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
+
+#elif MI_TRACK_ETW
+// windows event tracing
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 1
+#define MI_TRACK_TOOL "ETW"
+
+#include "../src/prim/windows/etw.h"
+
+#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
+#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
+#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
+
+#else
+// no tracking
+
+#define MI_TRACK_ENABLED 0
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "none"
+
+#define mi_track_malloc_size(p,reqsize,size,zero)
+#define mi_track_free_size(p,_size)
+
+#endif
+
+// -------------------
+// Utility definitions
+
+#ifndef mi_track_resize
+#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
+#endif
+
+#ifndef mi_track_align
+#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
+#endif
+
+#ifndef mi_track_init
+#define mi_track_init()
+#endif
+
+#ifndef mi_track_mem_defined
+#define mi_track_mem_defined(p,size)
+#endif
+
+#ifndef mi_track_mem_undefined
+#define mi_track_mem_undefined(p,size)
+#endif
+
+#ifndef mi_track_mem_noaccess
+#define mi_track_mem_noaccess(p,size)
+#endif
+
+
+#if MI_PADDING
+#define mi_track_malloc(p,reqsize,zero) \
+ if ((p)!=NULL) { \
+ mi_assert_internal(mi_usable_size(p)==(reqsize)); \
+ mi_track_malloc_size(p,reqsize,reqsize,zero); \
+ }
+#else
+#define mi_track_malloc(p,reqsize,zero) \
+ if ((p)!=NULL) { \
+ mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
+ mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
+ }
+#endif
+
+#endif
diff --git a/compat/mimalloc/mimalloc/types.h b/compat/mimalloc/mimalloc/types.h
new file mode 100644
index 00000000000000..e778e8788908fc
--- /dev/null
+++ b/compat/mimalloc/mimalloc/types.h
@@ -0,0 +1,687 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_TYPES_H
+#define MIMALLOC_TYPES_H
+
+// --------------------------------------------------------------------------
+// This file contains the main type definitions for mimalloc:
+// mi_heap_t : all data for a thread-local heap, contains
+// lists of all managed heap pages.
+// mi_segment_t : a larger chunk of memory (32GiB) from where pages
+// are allocated. A segment is divided in slices (64KiB) from
+// which pages are allocated.
+// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from
+// where objects are allocated.
+// Note: we write "OS page" for OS memory pages while
+// using plain "page" for mimalloc pages (`mi_page_t`).
+// --------------------------------------------------------------------------
+
+
+#include
+#include // ptrdiff_t
+#include // uintptr_t, uint16_t, etc
+#include // bool
+#include "atomic.h" // _Atomic
+
+#ifdef _MSC_VER
+#pragma warning(disable:4214) // bitfield is not int
+#endif
+
+// Minimal alignment necessary. On most platforms 16 bytes are needed
+// due to SSE registers for example. This must be at least `sizeof(void*)`
+#ifndef MI_MAX_ALIGN_SIZE
+#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
+#endif
+
+// ------------------------------------------------------
+// Variants
+// ------------------------------------------------------
+
+// Define NDEBUG in the release version to disable assertions.
+// #define NDEBUG
+
+// Define MI_TRACK_ to enable tracking support
+// #define MI_TRACK_VALGRIND 1
+// #define MI_TRACK_ASAN 1
+// #define MI_TRACK_ETW 1
+
+// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
+// #define MI_STAT 1
+
+// Define MI_SECURE to enable security mitigations
+// #define MI_SECURE 1 // guard page around metadata
+// #define MI_SECURE 2 // guard page around each mimalloc page
+// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
+// #define MI_SECURE 4 // checks for double free. (may be more expensive)
+
+#if !defined(MI_SECURE)
+#define MI_SECURE 0
+#endif
+
+// Define MI_DEBUG for debug mode
+// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
+// #define MI_DEBUG 2 // + internal assertion checks
+// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
+#if !defined(MI_DEBUG)
+#if defined(MI_BUILD_RELEASE) || defined(NDEBUG)
+#define MI_DEBUG 0
+#else
+#define MI_DEBUG 2
+#endif
+#endif
+
+// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
+// Padding should be disabled when using guard pages
+// #define MI_GUARDED 1
+#if defined(MI_GUARDED)
+#define MI_PADDING 0
+#endif
+
+// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
+// The padding can detect buffer overflow on free.
+#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
+#define MI_PADDING 1
+#endif
+
+// Check padding bytes; allows byte-precise buffer overflow detection
+#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
+#define MI_PADDING_CHECK 1
+#endif
+
+
+// Encoded free lists allow detection of corrupted free lists
+// and can detect buffer overflows, modify after free, and double `free`s.
+#if (MI_SECURE>=3 || MI_DEBUG>=1)
+#define MI_ENCODE_FREELIST 1
+#endif
+
+
+// We used to abandon huge pages in order to eagerly deallocate it if freed from another thread.
+// Unfortunately, that makes it not possible to visit them during a heap walk or include them in a
+// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks nowadays if freed from
+// another thread so the memory becomes "virtually" available (and eventually gets properly freed by
+// the owning thread).
+// #define MI_HUGE_PAGE_ABANDON 1
+
+
+// ------------------------------------------------------
+// Platform specific values
+// ------------------------------------------------------
+
+// ------------------------------------------------------
+// Size of a pointer.
+// We assume that `sizeof(void*)==sizeof(intptr_t)`
+// and it holds for all platforms we know of.
+//
+// However, the C standard only requires that:
+// p == (void*)((intptr_t)p))
+// but we also need:
+// i == (intptr_t)((void*)i)
+// or otherwise one might define an intptr_t type that is larger than a pointer...
+// ------------------------------------------------------
+
+#if INTPTR_MAX > INT64_MAX
+# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
+#elif INTPTR_MAX == INT64_MAX
+# define MI_INTPTR_SHIFT (3)
+#elif INTPTR_MAX == INT32_MAX
+# define MI_INTPTR_SHIFT (2)
+#else
+#error platform pointers must be 32, 64, or 128 bits
+#endif
+
+#if SIZE_MAX == UINT64_MAX
+# define MI_SIZE_SHIFT (3)
+typedef int64_t mi_ssize_t;
+#elif SIZE_MAX == UINT32_MAX
+# define MI_SIZE_SHIFT (2)
+typedef int32_t mi_ssize_t;
+#else
+#error platform objects must be 32 or 64 bits
+#endif
+
+#if (SIZE_MAX/2) > LONG_MAX
+# define MI_ZU(x) x##ULL
+# define MI_ZI(x) x##LL
+#else
+# define MI_ZU(x) x##UL
+# define MI_ZI(x) x##L
+#endif
+
+#define MI_INTPTR_SIZE (1< 4
+#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
+#else
+#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
+#endif
+#endif
+
+#ifndef MI_SMALL_PAGE_SHIFT
+#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
+#endif
+#ifndef MI_MEDIUM_PAGE_SHIFT
+#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
+#endif
+
+// Derived constants
+#define MI_SEGMENT_SIZE (MI_ZU(1)<= 655360)
+#error "mimalloc internal: define more bins"
+#endif
+
+// Maximum block size for which blocks are guaranteed to be block size aligned. (see `segment.c:_mi_segment_page_start`)
+#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX)
+
+// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
+#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
+
+// Maximum slice count (255) for which we can find the page for interior pointers
+#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
+
+// we never allocate more than PTRDIFF_MAX (see also )
+// on 64-bit+ systems we also limit the maximum allocation size such that the slice count fits in 32-bits. (issue #877)
+#if (PTRDIFF_MAX > INT32_MAX) && (PTRDIFF_MAX >= (MI_SEGMENT_SLIZE_SIZE * UINT32_MAX))
+#define MI_MAX_ALLOC_SIZE (MI_SEGMENT_SLICE_SIZE * (UINT32_MAX-1))
+#else
+#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
+#endif
+
+
+// ------------------------------------------------------
+// Mimalloc pages contain allocated blocks
+// ------------------------------------------------------
+
+// The free lists use encoded next fields
+// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
+typedef uintptr_t mi_encoded_t;
+
+// thread id's
+typedef size_t mi_threadid_t;
+
+// free lists contain blocks
+typedef struct mi_block_s {
+ mi_encoded_t next;
+} mi_block_t;
+
+#if MI_GUARDED
+// we always align guarded pointers in a block at an offset
+// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones
+#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0))
+#define MI_BLOCK_TAG_GUARDED (~MI_BLOCK_TAG_ALIGNED)
+#endif
+
+
+// The delayed flags are used for efficient multi-threaded free-ing
+typedef enum mi_delayed_e {
+ MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
+ MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
+ MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
+ MI_NEVER_DELAYED_FREE = 3 // sticky: used for abandoned pages without a owning heap; this only resets on page reclaim
+} mi_delayed_t;
+
+
+// The `in_full` and `has_aligned` page flags are put in a union to efficiently
+// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
+#if !MI_TSAN
+typedef union mi_page_flags_s {
+ uint8_t full_aligned;
+ struct {
+ uint8_t in_full : 1;
+ uint8_t has_aligned : 1;
+ } x;
+} mi_page_flags_t;
+#else
+// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
+typedef union mi_page_flags_s {
+ uint32_t full_aligned;
+ struct {
+ uint8_t in_full;
+ uint8_t has_aligned;
+ } x;
+} mi_page_flags_t;
+#endif
+
+// Thread free list.
+// We use the bottom 2 bits of the pointer for mi_delayed_t flags
+typedef uintptr_t mi_thread_free_t;
+
+// A page contains blocks of one specific size (`block_size`).
+// Each page has three list of free blocks:
+// `free` for blocks that can be allocated,
+// `local_free` for freed blocks that are not yet available to `mi_malloc`
+// `thread_free` for freed blocks by other threads
+// The `local_free` and `thread_free` lists are migrated to the `free` list
+// when it is exhausted. The separate `local_free` list is necessary to
+// implement a monotonic heartbeat. The `thread_free` list is needed for
+// avoiding atomic operations in the common case.
+//
+// `used - |thread_free|` == actual blocks that are in use (alive)
+// `used - |thread_free| + |free| + |local_free| == capacity`
+//
+// We don't count `freed` (as |free|) but use `used` to reduce
+// the number of memory accesses in the `mi_page_all_free` function(s).
+//
+// Notes:
+// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
+// - Using `uint16_t` does not seem to slow things down
+// - The size is 12 words on 64-bit which helps the page index calculations
+// (and 14 words on 32-bit, and encoded free lists add 2 words)
+// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
+// concurrent frees where only the first concurrent free adds to the owning
+// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
+// The invariant is that no-delayed-free is only set if there is
+// at least one block that will be added, or as already been added, to
+// the owning heap `thread_delayed_free` list. This guarantees that pages
+// will be freed correctly even if only other threads free blocks.
+typedef struct mi_page_s {
+ // "owned" by the segment
+ uint32_t slice_count; // slices in this page (0 if not a page)
+ uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
+ uint8_t is_committed:1; // `true` if the page virtual memory is committed
+ uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
+ uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`)
+ // padding
+ // layout like this to optimize access in `mi_malloc` and `mi_free`
+ uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
+ uint16_t reserved; // number of blocks reserved in memory
+ mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
+ uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
+ uint8_t retire_expire:7; // expiration count for retired blocks
+
+ mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
+ mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
+ uint16_t used; // number of blocks in use (including blocks in `thread_free`)
+ uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
+ uint8_t heap_tag; // tag of the owning heap, used to separate heaps by object type
+ // padding
+ size_t block_size; // size available in each block (always `>0`)
+ uint8_t* page_start; // start of the page area containing the blocks
+
+ #if (MI_ENCODE_FREELIST || MI_PADDING)
+ uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
+ #endif
+
+ _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
+ _Atomic(uintptr_t) xheap;
+
+ struct mi_page_s* next; // next page owned by this thread with the same `block_size`
+ struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
+
+ // 64-bit 11 words, 32-bit 13 words, (+2 for secure)
+ void* padding[1];
+} mi_page_t;
+
+
+
+// ------------------------------------------------------
+// Mimalloc segments contain mimalloc pages
+// ------------------------------------------------------
+
+typedef enum mi_page_kind_e {
+ MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
+ MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment
+ MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
+ MI_PAGE_HUGE // a huge page is a single page in a segment of variable size
+ // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`.
+} mi_page_kind_t;
+
+typedef enum mi_segment_kind_e {
+ MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
+ MI_SEGMENT_HUGE, // segment with just one huge page inside.
+} mi_segment_kind_t;
+
+// ------------------------------------------------------
+// A segment holds a commit mask where a bit is set if
+// the corresponding MI_COMMIT_SIZE area is committed.
+// The MI_COMMIT_SIZE must be a multiple of the slice
+// size. If it is equal we have the most fine grained
+// decommit (but setting it higher can be more efficient).
+// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
+// be committed in one go which can be set higher than
+// MI_COMMIT_SIZE for efficiency (while the decommit mask
+// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
+// ------------------------------------------------------
+
+#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
+#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
+#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
+#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
+#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
+
+#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
+#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
+#endif
+
+typedef struct mi_commit_mask_s {
+ size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
+} mi_commit_mask_t;
+
+typedef mi_page_t mi_slice_t;
+typedef int64_t mi_msecs_t;
+
+
+// ---------------------------------------------------------------
+// a memory id tracks the provenance of arena/OS allocated memory
+// ---------------------------------------------------------------
+
+// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
+typedef enum mi_memkind_e {
+ MI_MEM_NONE, // not allocated
+ MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
+ MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
+ MI_MEM_OS, // allocated from the OS
+ MI_MEM_OS_HUGE, // allocated as huge OS pages (usually 1GiB, pinned to physical memory)
+ MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
+ MI_MEM_ARENA // allocated from an arena (the usual case)
+} mi_memkind_t;
+
+static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
+ return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
+}
+
+typedef struct mi_memid_os_info {
+ void* base; // actual base address of the block (used for offset aligned allocations)
+ size_t size; // full allocation size
+} mi_memid_os_info_t;
+
+typedef struct mi_memid_arena_info {
+ size_t block_index; // index in the arena
+ mi_arena_id_t id; // arena id (>= 1)
+ bool is_exclusive; // this arena can only be used for specific arena allocations
+} mi_memid_arena_info_t;
+
+typedef struct mi_memid_s {
+ union {
+ mi_memid_os_info_t os; // only used for MI_MEM_OS
+ mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
+ } mem;
+ bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large (2Mib) or huge (1GiB) OS pages)
+ bool initially_committed;// `true` if the memory was originally allocated as committed
+ bool initially_zero; // `true` if the memory was originally zero initialized
+ mi_memkind_t memkind;
+} mi_memid_t;
+
+
+// -----------------------------------------------------------------------------------------
+// Segments are large allocated memory blocks (32mb on 64 bit) from arenas or the OS.
+//
+// Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks.
+// The start of a segment is this structure with a fixed number of slice entries (`slices`)
+// usually followed by a guard OS page and the actual allocation area with pages.
+// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`).
+// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent
+// slices part of the area, the `slice_offset` is the byte offset back to the first slice
+// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`).
+// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`).
+// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while
+// large and huge pages span a variable amount of slices.
+
+typedef struct mi_subproc_s mi_subproc_t;
+
+typedef struct mi_segment_s {
+ // constant fields
+ mi_memid_t memid; // memory id for arena/OS allocation
+ bool allow_decommit; // can we decommmit the memory
+ bool allow_purge; // can we purge the memory (reset or decommit)
+ size_t segment_size;
+ mi_subproc_t* subproc; // segment belongs to sub process
+
+ // segment fields
+ mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
+ mi_commit_mask_t purge_mask; // slices that can be purged
+ mi_commit_mask_t commit_mask; // slices that are currently committed
+
+ // from here is zero initialized
+ struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
+ bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
+ bool dont_free; // can be temporarily true to ensure the segment is not freed
+ bool free_is_zero; // if free spans are zero
+
+ size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
+ size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
+ size_t used; // count of pages in use
+ uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
+
+ struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
+ struct mi_segment_s* abandoned_os_prev;
+
+ size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
+ size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages.
+
+ // layout like this to optimize access in `mi_free`
+ mi_segment_kind_t kind;
+ size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
+ _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
+
+ mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment
+} mi_segment_t;
+
+
+// ------------------------------------------------------
+// Heaps
+// Provide first-class heaps to allocate from.
+// A heap just owns a set of pages for allocation and
+// can only be allocate/reallocate from the thread that created it.
+// Freeing blocks can be done from any thread though.
+// Per thread, the segments are shared among its heaps.
+// Per thread, there is always a default heap that is
+// used for allocation; it is initialized to statically
+// point to an empty heap to avoid initialization checks
+// in the fast path.
+// ------------------------------------------------------
+
+// Thread local data
+typedef struct mi_tld_s mi_tld_t;
+
+// Pages of a certain block size are held in a queue.
+typedef struct mi_page_queue_s {
+ mi_page_t* first;
+ mi_page_t* last;
+ size_t block_size;
+} mi_page_queue_t;
+
+#define MI_BIN_FULL (MI_BIN_HUGE+1)
+
+// Random context
+typedef struct mi_random_cxt_s {
+ uint32_t input[16];
+ uint32_t output[16];
+ int output_available;
+ bool weak;
+} mi_random_ctx_t;
+
+
+// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
+#if (MI_PADDING)
+typedef struct mi_padding_s {
+ uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
+ uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
+} mi_padding_t;
+#define MI_PADDING_SIZE (sizeof(mi_padding_t))
+#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
+#else
+#define MI_PADDING_SIZE 0
+#define MI_PADDING_WSIZE 0
+#endif
+
+#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
+
+
+// A heap owns a set of pages.
+struct mi_heap_s {
+ mi_tld_t* tld;
+ _Atomic(mi_block_t*) thread_delayed_free;
+ mi_threadid_t thread_id; // thread this heap belongs too
+ mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
+ uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
+ uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
+ mi_random_ctx_t random; // random number context used for secure allocation
+ size_t page_count; // total number of pages in the `pages` queues.
+ size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
+ size_t page_retired_max; // largest retired index into the `pages` array.
+ long generic_count; // how often is `_mi_malloc_generic` called?
+ long generic_collect_count; // how often is `_mi_malloc_generic` called without collecting?
+ mi_heap_t* next; // list of heaps per thread
+ bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
+ uint8_t tag; // custom tag, can be used for separating heaps based on the object types
+ #if MI_GUARDED
+ size_t guarded_size_min; // minimal size for guarded objects
+ size_t guarded_size_max; // maximal size for guarded objects
+ size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages)
+ size_t guarded_sample_count; // current sample count (counting down to 0)
+ #endif
+ mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
+ mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
+};
+
+
+// ------------------------------------------------------
+// Sub processes do not reclaim or visit segments
+// from other sub processes. These are essentially the
+// static variables of a process.
+// ------------------------------------------------------
+
+struct mi_subproc_s {
+ _Atomic(size_t) abandoned_count; // count of abandoned segments for this sub-process
+ _Atomic(size_t) abandoned_os_list_count; // count of abandoned segments in the os-list
+ mi_lock_t abandoned_os_lock; // lock for the abandoned os segment list (outside of arena's) (this lock protect list operations)
+ mi_lock_t abandoned_os_visit_lock; // ensure only one thread per subproc visits the abandoned os list
+ mi_segment_t* abandoned_os_list; // doubly-linked list of abandoned segments outside of arena's (in OS allocated memory)
+ mi_segment_t* abandoned_os_list_tail; // the tail-end of the list
+ mi_memid_t memid; // provenance of this memory block
+};
+
+
+// ------------------------------------------------------
+// Thread Local data
+// ------------------------------------------------------
+
+// A "span" is is an available range of slices. The span queues keep
+// track of slice spans of at most the given `slice_count` (but more than the previous size class).
+typedef struct mi_span_queue_s {
+ mi_slice_t* first;
+ mi_slice_t* last;
+ size_t slice_count;
+} mi_span_queue_t;
+
+#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
+
+// Segments thread local data
+typedef struct mi_segments_tld_s {
+ mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
+ size_t count; // current number of segments;
+ size_t peak_count; // peak number of segments
+ size_t current_size; // current size of all segments
+ size_t peak_size; // peak size of all segments
+ size_t reclaim_count;// number of reclaimed (abandoned) segments
+ mi_subproc_t* subproc; // sub-process this thread belongs to.
+ mi_stats_t* stats; // points to tld stats
+} mi_segments_tld_t;
+
+// Thread local data
+struct mi_tld_s {
+ unsigned long long heartbeat; // monotonic heartbeat count
+ bool recurse; // true if deferred was called; used to prevent infinite recursion.
+ mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
+ mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
+ mi_segments_tld_t segments; // segment tld
+ mi_stats_t stats; // statistics
+};
+
+
+// ------------------------------------------------------
+// Debug
+// ------------------------------------------------------
+
+#if !defined(MI_DEBUG_UNINIT)
+#define MI_DEBUG_UNINIT (0xD0)
+#endif
+#if !defined(MI_DEBUG_FREED)
+#define MI_DEBUG_FREED (0xDF)
+#endif
+#if !defined(MI_DEBUG_PADDING)
+#define MI_DEBUG_PADDING (0xDE)
+#endif
+
+
+// ------------------------------------------------------
+// Statistics
+// ------------------------------------------------------
+#ifndef MI_STAT
+#if (MI_DEBUG>0)
+#define MI_STAT 2
+#else
+#define MI_STAT 0
+#endif
+#endif
+
+// add to stat keeping track of the peak
+void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
+void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
+void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount);
+// counters can just be increased
+void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
+
+#if (MI_STAT)
+#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
+#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
+#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount)
+#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
+#else
+#define mi_stat_increase(stat,amount) ((void)0)
+#define mi_stat_decrease(stat,amount) ((void)0)
+#define mi_stat_adjust_decrease(stat,amount) ((void)0)
+#define mi_stat_counter_increase(stat,amount) ((void)0)
+#endif
+
+#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
+#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
+#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
+#define mi_heap_stat_adjust_decrease(heap,stat,amount) mi_stat_adjust_decrease( (heap)->tld->stats.stat, amount)
+
+#endif
diff --git a/compat/mimalloc/options.c b/compat/mimalloc/options.c
new file mode 100644
index 00000000000000..b07f029e65dd29
--- /dev/null
+++ b/compat/mimalloc/options.c
@@ -0,0 +1,670 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // mi_prim_out_stderr
+
+#include // stdin/stdout
+#include // abort
+
+
+
+static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit)
+static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit)
+
+static void mi_add_stderr_output(void);
+
+int mi_version(void) mi_attr_noexcept {
+ return MI_MALLOC_VERSION;
+}
+
+
+// --------------------------------------------------------
+// Options
+// These can be accessed by multiple threads and may be
+// concurrently initialized, but an initializing data race
+// is ok since they resolve to the same value.
+// --------------------------------------------------------
+typedef enum mi_init_e {
+ UNINIT, // not yet initialized
+ DEFAULTED, // not found in the environment, use default value
+ INITIALIZED // found in environment or set explicitly
+} mi_init_t;
+
+typedef struct mi_option_desc_s {
+ long value; // the value
+ mi_init_t init; // is it initialized yet? (from the environment)
+ mi_option_t option; // for debugging: the option index should match the option
+ const char* name; // option name without `mimalloc_` prefix
+ const char* legacy_name; // potential legacy option name
+} mi_option_desc_t;
+
+#define MI_OPTION(opt) mi_option_##opt, #opt, NULL
+#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy
+
+// Some options can be set at build time for statically linked libraries
+// (use `-DMI_EXTRA_CPPDEFS="opt1=val1;opt2=val2"`)
+//
+// This is useful if we cannot pass them as environment variables
+// (and setting them programmatically would be too late)
+
+#ifndef MI_DEFAULT_VERBOSE
+#define MI_DEFAULT_VERBOSE 0
+#endif
+
+#ifndef MI_DEFAULT_EAGER_COMMIT
+#define MI_DEFAULT_EAGER_COMMIT 1
+#endif
+
+#ifndef MI_DEFAULT_ARENA_EAGER_COMMIT
+#define MI_DEFAULT_ARENA_EAGER_COMMIT 2
+#endif
+
+// in KiB
+#ifndef MI_DEFAULT_ARENA_RESERVE
+ #if (MI_INTPTR_SIZE>4)
+ #define MI_DEFAULT_ARENA_RESERVE 1024L*1024L
+ #else
+ #define MI_DEFAULT_ARENA_RESERVE 128L*1024L
+ #endif
+#endif
+
+#ifndef MI_DEFAULT_DISALLOW_ARENA_ALLOC
+#define MI_DEFAULT_DISALLOW_ARENA_ALLOC 0
+#endif
+
+#ifndef MI_DEFAULT_ALLOW_LARGE_OS_PAGES
+#define MI_DEFAULT_ALLOW_LARGE_OS_PAGES 0
+#endif
+
+#ifndef MI_DEFAULT_RESERVE_HUGE_OS_PAGES
+#define MI_DEFAULT_RESERVE_HUGE_OS_PAGES 0
+#endif
+
+#ifndef MI_DEFAULT_RESERVE_OS_MEMORY
+#define MI_DEFAULT_RESERVE_OS_MEMORY 0
+#endif
+
+#ifndef MI_DEFAULT_GUARDED_SAMPLE_RATE
+#if MI_GUARDED
+#define MI_DEFAULT_GUARDED_SAMPLE_RATE 4000
+#else
+#define MI_DEFAULT_GUARDED_SAMPLE_RATE 0
+#endif
+#endif
+
+
+#ifndef MI_DEFAULT_ALLOW_THP
+#if defined(__ANDROID__)
+#define MI_DEFAULT_ALLOW_THP 0
+#else
+#define MI_DEFAULT_ALLOW_THP 1
+#endif
+#endif
+
+// Static options
+static mi_option_desc_t options[_mi_option_last] =
+{
+ // stable options
+ #if MI_DEBUG || defined(MI_SHOW_ERRORS)
+ { 1, UNINIT, MI_OPTION(show_errors) },
+ #else
+ { 0, UNINIT, MI_OPTION(show_errors) },
+ #endif
+ { 0, UNINIT, MI_OPTION(show_stats) },
+ { MI_DEFAULT_VERBOSE, UNINIT, MI_OPTION(verbose) },
+
+ // some of the following options are experimental and not all combinations are allowed.
+ { MI_DEFAULT_EAGER_COMMIT,
+ UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
+ { MI_DEFAULT_ARENA_EAGER_COMMIT,
+ UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux)
+ { 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit)
+ { MI_DEFAULT_ALLOW_LARGE_OS_PAGES,
+ UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
+ { MI_DEFAULT_RESERVE_HUGE_OS_PAGES,
+ UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
+ {-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
+ { MI_DEFAULT_RESERVE_OS_MEMORY,
+ UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
+ { 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
+ { 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
+ { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates
+ { 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit)
+#if defined(__NetBSD__)
+ { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
+#else
+ { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
+#endif
+ { 10, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
+ { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
+ { 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
+ { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
+ { 32, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
+ { 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
+ { 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try.
+ { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
+ { MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
+ { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
+ { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
+ { 0, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
+ { MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
+ { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
+#if defined(MI_VISIT_ABANDONED)
+ { 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandoned segments; requires taking locks during reclaim.
+#else
+ { 0, UNINIT, MI_OPTION(visit_abandoned) },
+#endif
+ { 0, UNINIT, MI_OPTION(guarded_min) }, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects
+ { MI_GiB, UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects
+ { 0, UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
+ { MI_DEFAULT_GUARDED_SAMPLE_RATE,
+ UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
+ { 0, UNINIT, MI_OPTION(guarded_sample_seed)},
+ { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
+ { 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
+ { MI_DEFAULT_ALLOW_THP,
+ UNINIT, MI_OPTION(allow_thp) } // allow transparent huge pages?
+};
+
+static void mi_option_init(mi_option_desc_t* desc);
+
+static bool mi_option_has_size_in_kib(mi_option_t option) {
+ return (option == mi_option_reserve_os_memory || option == mi_option_arena_reserve);
+}
+
+void _mi_options_init(void) {
+ // called on process load
+ mi_add_stderr_output(); // now it safe to use stderr for output
+ for(int i = 0; i < _mi_option_last; i++ ) {
+ mi_option_t option = (mi_option_t)i;
+ long l = mi_option_get(option); MI_UNUSED(l); // initialize
+ }
+ mi_max_error_count = mi_option_get(mi_option_max_errors);
+ mi_max_warning_count = mi_option_get(mi_option_max_warnings);
+ #if MI_GUARDED
+ if (mi_option_get(mi_option_guarded_sample_rate) > 0) {
+ if (mi_option_is_enabled(mi_option_allow_large_os_pages)) {
+ mi_option_disable(mi_option_allow_large_os_pages);
+ _mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n");
+ }
+ }
+ #endif
+ if (mi_option_is_enabled(mi_option_verbose)) { mi_options_print(); }
+}
+
+#define mi_stringifyx(str) #str // and stringify
+#define mi_stringify(str) mi_stringifyx(str) // expand
+
+void mi_options_print(void) mi_attr_noexcept
+{
+ // show version
+ const int vermajor = MI_MALLOC_VERSION/100;
+ const int verminor = (MI_MALLOC_VERSION%100)/10;
+ const int verpatch = (MI_MALLOC_VERSION%10);
+ _mi_message("v%i.%i.%i%s%s (built on %s, %s)\n", vermajor, verminor, verpatch,
+ #if defined(MI_CMAKE_BUILD_TYPE)
+ ", " mi_stringify(MI_CMAKE_BUILD_TYPE)
+ #else
+ ""
+ #endif
+ ,
+ #if defined(MI_GIT_DESCRIBE)
+ ", git " mi_stringify(MI_GIT_DESCRIBE)
+ #else
+ ""
+ #endif
+ , __DATE__, __TIME__);
+
+ // show options
+ for (int i = 0; i < _mi_option_last; i++) {
+ mi_option_t option = (mi_option_t)i;
+ long l = mi_option_get(option); MI_UNUSED(l); // possibly initialize
+ mi_option_desc_t* desc = &options[option];
+ _mi_message("option '%s': %ld %s\n", desc->name, desc->value, (mi_option_has_size_in_kib(option) ? "KiB" : ""));
+ }
+
+ // show build configuration
+ _mi_message("debug level : %d\n", MI_DEBUG );
+ _mi_message("secure level: %d\n", MI_SECURE );
+ _mi_message("mem tracking: %s\n", MI_TRACK_TOOL);
+ #if MI_GUARDED
+ _mi_message("guarded build: %s\n", mi_option_get(mi_option_guarded_sample_rate) != 0 ? "enabled" : "disabled");
+ #endif
+ #if MI_TSAN
+ _mi_message("thread santizer enabled\n");
+ #endif
+}
+
+long _mi_option_get_fast(mi_option_t option) {
+ mi_assert(option >= 0 && option < _mi_option_last);
+ mi_option_desc_t* desc = &options[option];
+ mi_assert(desc->option == option); // index should match the option
+ //mi_assert(desc->init != UNINIT);
+ return desc->value;
+}
+
+
+mi_decl_nodiscard long mi_option_get(mi_option_t option) {
+ mi_assert(option >= 0 && option < _mi_option_last);
+ if (option < 0 || option >= _mi_option_last) return 0;
+ mi_option_desc_t* desc = &options[option];
+ mi_assert(desc->option == option); // index should match the option
+ if mi_unlikely(desc->init == UNINIT) {
+ mi_option_init(desc);
+ }
+ return desc->value;
+}
+
+mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long max) {
+ long x = mi_option_get(option);
+ return (x < min ? min : (x > max ? max : x));
+}
+
+mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
+ const long x = mi_option_get(option);
+ size_t size = (x < 0 ? 0 : (size_t)x);
+ if (mi_option_has_size_in_kib(option)) {
+ size *= MI_KiB;
+ }
+ return size;
+}
+
+void mi_option_set(mi_option_t option, long value) {
+ mi_assert(option >= 0 && option < _mi_option_last);
+ if (option < 0 || option >= _mi_option_last) return;
+ mi_option_desc_t* desc = &options[option];
+ mi_assert(desc->option == option); // index should match the option
+ desc->value = value;
+ desc->init = INITIALIZED;
+ // ensure min/max range; be careful to not recurse.
+ if (desc->option == mi_option_guarded_min && _mi_option_get_fast(mi_option_guarded_max) < value) {
+ mi_option_set(mi_option_guarded_max, value);
+ }
+ else if (desc->option == mi_option_guarded_max && _mi_option_get_fast(mi_option_guarded_min) > value) {
+ mi_option_set(mi_option_guarded_min, value);
+ }
+}
+
+void mi_option_set_default(mi_option_t option, long value) {
+ mi_assert(option >= 0 && option < _mi_option_last);
+ if (option < 0 || option >= _mi_option_last) return;
+ mi_option_desc_t* desc = &options[option];
+ if (desc->init != INITIALIZED) {
+ desc->value = value;
+ }
+}
+
+mi_decl_nodiscard bool mi_option_is_enabled(mi_option_t option) {
+ return (mi_option_get(option) != 0);
+}
+
+void mi_option_set_enabled(mi_option_t option, bool enable) {
+ mi_option_set(option, (enable ? 1 : 0));
+}
+
+void mi_option_set_enabled_default(mi_option_t option, bool enable) {
+ mi_option_set_default(option, (enable ? 1 : 0));
+}
+
+void mi_option_enable(mi_option_t option) {
+ mi_option_set_enabled(option,true);
+}
+
+void mi_option_disable(mi_option_t option) {
+ mi_option_set_enabled(option,false);
+}
+
+static void mi_cdecl mi_out_stderr(const char* msg, void* arg) {
+ MI_UNUSED(arg);
+ if (msg != NULL && msg[0] != 0) {
+ _mi_prim_out_stderr(msg);
+ }
+}
+
+// Since an output function can be registered earliest in the `main`
+// function we also buffer output that happens earlier. When
+// an output function is registered it is called immediately with
+// the output up to that point.
+#ifndef MI_MAX_DELAY_OUTPUT
+#define MI_MAX_DELAY_OUTPUT ((size_t)(16*1024))
+#endif
+static char out_buf[MI_MAX_DELAY_OUTPUT+1];
+static _Atomic(size_t) out_len;
+
+static void mi_cdecl mi_out_buf(const char* msg, void* arg) {
+ MI_UNUSED(arg);
+ if (msg==NULL) return;
+ if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return;
+ size_t n = _mi_strlen(msg);
+ if (n==0) return;
+ // claim space
+ size_t start = mi_atomic_add_acq_rel(&out_len, n);
+ if (start >= MI_MAX_DELAY_OUTPUT) return;
+ // check bound
+ if (start+n >= MI_MAX_DELAY_OUTPUT) {
+ n = MI_MAX_DELAY_OUTPUT-start-1;
+ }
+ _mi_memcpy(&out_buf[start], msg, n);
+}
+
+static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) {
+ if (out==NULL) return;
+ // claim (if `no_more_buf == true`, no more output will be added after this point)
+ size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1));
+ // and output the current contents
+ if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT;
+ out_buf[count] = 0;
+ out(out_buf,arg);
+ if (!no_more_buf) {
+ out_buf[count] = '\n'; // if continue with the buffer, insert a newline
+ }
+}
+
+
+// Once this module is loaded, switch to this routine
+// which outputs to stderr and the delayed output buffer.
+static void mi_cdecl mi_out_buf_stderr(const char* msg, void* arg) {
+ mi_out_stderr(msg,arg);
+ mi_out_buf(msg,arg);
+}
+
+
+
+// --------------------------------------------------------
+// Default output handler
+// --------------------------------------------------------
+
+// Should be atomic but gives errors on many platforms as generally we cannot cast a function pointer to a uintptr_t.
+// For now, don't register output from multiple threads.
+static mi_output_fun* volatile mi_out_default; // = NULL
+static _Atomic(void*) mi_out_arg; // = NULL
+
+static mi_output_fun* mi_out_get_default(void** parg) {
+ if (parg != NULL) { *parg = mi_atomic_load_ptr_acquire(void,&mi_out_arg); }
+ mi_output_fun* out = mi_out_default;
+ return (out == NULL ? &mi_out_buf : out);
+}
+
+void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept {
+ mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer
+ mi_atomic_store_ptr_release(void,&mi_out_arg, arg);
+ if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now
+}
+
+// add stderr to the delayed output after the module is loaded
+static void mi_add_stderr_output(void) {
+ mi_assert_internal(mi_out_default == NULL);
+ mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr
+ mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output
+}
+
+// --------------------------------------------------------
+// Messages, all end up calling `_mi_fputs`.
+// --------------------------------------------------------
+static _Atomic(size_t) error_count; // = 0; // when >= max_error_count stop emitting errors
+static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings
+
+// When overriding malloc, we may recurse into mi_vfprintf if an allocation
+// inside the C runtime causes another message.
+// In some cases (like on macOS) the loader already allocates which
+// calls into mimalloc; if we then access thread locals (like `recurse`)
+// this may crash as the access may call _tlv_bootstrap that tries to
+// (recursively) invoke malloc again to allocate space for the thread local
+// variables on demand. This is why we use a _mi_preloading test on such
+// platforms. However, C code generator may move the initial thread local address
+// load before the `if` and we therefore split it out in a separate function.
+static mi_decl_thread bool recurse = false;
+
+static mi_decl_noinline bool mi_recurse_enter_prim(void) {
+ if (recurse) return false;
+ recurse = true;
+ return true;
+}
+
+static mi_decl_noinline void mi_recurse_exit_prim(void) {
+ recurse = false;
+}
+
+static bool mi_recurse_enter(void) {
+ #if defined(__APPLE__) || defined(__ANDROID__) || defined(MI_TLS_RECURSE_GUARD)
+ if (_mi_preloading()) return false;
+ #endif
+ return mi_recurse_enter_prim();
+}
+
+static void mi_recurse_exit(void) {
+ #if defined(__APPLE__) || defined(__ANDROID__) || defined(MI_TLS_RECURSE_GUARD)
+ if (_mi_preloading()) return;
+ #endif
+ mi_recurse_exit_prim();
+}
+
+void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
+ if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr?
+ if (!mi_recurse_enter()) return;
+ out = mi_out_get_default(&arg);
+ if (prefix != NULL) out(prefix, arg);
+ out(message, arg);
+ mi_recurse_exit();
+ }
+ else {
+ if (prefix != NULL) out(prefix, arg);
+ out(message, arg);
+ }
+}
+
+// Define our own limited `fprintf` that avoids memory allocation.
+// We do this using `_mi_vsnprintf` with a limited buffer.
+static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) {
+ char buf[512];
+ if (fmt==NULL) return;
+ if (!mi_recurse_enter()) return;
+ _mi_vsnprintf(buf, sizeof(buf)-1, fmt, args);
+ mi_recurse_exit();
+ _mi_fputs(out,arg,prefix,buf);
+}
+
+void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
+ va_list args;
+ va_start(args,fmt);
+ mi_vfprintf(out,arg,NULL,fmt,args);
+ va_end(args);
+}
+
+static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) {
+ if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) {
+ char tprefix[64];
+ _mi_snprintf(tprefix, sizeof(tprefix), "%sthread 0x%tx: ", prefix, (uintptr_t)_mi_thread_id());
+ mi_vfprintf(out, arg, tprefix, fmt, args);
+ }
+ else {
+ mi_vfprintf(out, arg, prefix, fmt, args);
+ }
+}
+
+void _mi_message(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args);
+ va_end(args);
+}
+
+void _mi_trace_message(const char* fmt, ...) {
+ if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher
+ va_list args;
+ va_start(args, fmt);
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args);
+ va_end(args);
+}
+
+void _mi_verbose_message(const char* fmt, ...) {
+ if (!mi_option_is_enabled(mi_option_verbose)) return;
+ va_list args;
+ va_start(args,fmt);
+ mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args);
+ va_end(args);
+}
+
+static void mi_show_error_message(const char* fmt, va_list args) {
+ if (!mi_option_is_enabled(mi_option_verbose)) {
+ if (!mi_option_is_enabled(mi_option_show_errors)) return;
+ if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
+ }
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args);
+}
+
+void _mi_warning_message(const char* fmt, ...) {
+ if (!mi_option_is_enabled(mi_option_verbose)) {
+ if (!mi_option_is_enabled(mi_option_show_errors)) return;
+ if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return;
+ }
+ va_list args;
+ va_start(args,fmt);
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args);
+ va_end(args);
+}
+
+
+#if MI_DEBUG
+mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) mi_attr_noexcept {
+ _mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion);
+ abort();
+}
+#endif
+
+// --------------------------------------------------------
+// Errors
+// --------------------------------------------------------
+
+static mi_error_fun* volatile mi_error_handler; // = NULL
+static _Atomic(void*) mi_error_arg; // = NULL
+
+static void mi_error_default(int err) {
+ MI_UNUSED(err);
+#if (MI_DEBUG>0)
+ if (err==EFAULT) {
+ #ifdef _MSC_VER
+ __debugbreak();
+ #endif
+ abort();
+ }
+#endif
+#if (MI_SECURE>0)
+ if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data)
+ abort();
+ }
+#endif
+#if defined(MI_XMALLOC)
+ if (err==ENOMEM || err==EOVERFLOW) { // abort on memory allocation fails in xmalloc mode
+ abort();
+ }
+#endif
+}
+
+void mi_register_error(mi_error_fun* fun, void* arg) {
+ mi_error_handler = fun; // can be NULL
+ mi_atomic_store_ptr_release(void,&mi_error_arg, arg);
+}
+
+void _mi_error_message(int err, const char* fmt, ...) {
+ // show detailed error message
+ va_list args;
+ va_start(args, fmt);
+ mi_show_error_message(fmt, args);
+ va_end(args);
+ // and call the error handler which may abort (or return normally)
+ if (mi_error_handler != NULL) {
+ mi_error_handler(err, mi_atomic_load_ptr_acquire(void,&mi_error_arg));
+ }
+ else {
+ mi_error_default(err);
+ }
+}
+
+// --------------------------------------------------------
+// Initialize options by checking the environment
+// --------------------------------------------------------
+
+// TODO: implement ourselves to reduce dependencies on the C runtime
+#include // strtol
+#include // strstr
+
+
+static void mi_option_init(mi_option_desc_t* desc) {
+ // Read option value from the environment
+ char s[64 + 1];
+ char buf[64+1];
+ _mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ _mi_strlcat(buf, desc->name, sizeof(buf));
+ bool found = _mi_getenv(buf, s, sizeof(s));
+ if (!found && desc->legacy_name != NULL) {
+ _mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ _mi_strlcat(buf, desc->legacy_name, sizeof(buf));
+ found = _mi_getenv(buf, s, sizeof(s));
+ if (found) {
+ _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name);
+ }
+ }
+
+ if (found) {
+ size_t len = _mi_strnlen(s, sizeof(buf) - 1);
+ for (size_t i = 0; i < len; i++) {
+ buf[i] = _mi_toupper(s[i]);
+ }
+ buf[len] = 0;
+ if (buf[0] == 0 || strstr("1;TRUE;YES;ON", buf) != NULL) {
+ desc->value = 1;
+ desc->init = INITIALIZED;
+ }
+ else if (strstr("0;FALSE;NO;OFF", buf) != NULL) {
+ desc->value = 0;
+ desc->init = INITIALIZED;
+ }
+ else {
+ char* end = buf;
+ long value = strtol(buf, &end, 10);
+ if (mi_option_has_size_in_kib(desc->option)) {
+ // this option is interpreted in KiB to prevent overflow of `long` for large allocations
+ // (long is 32-bit on 64-bit windows, which allows for 4TiB max.)
+ size_t size = (value < 0 ? 0 : (size_t)value);
+ bool overflow = false;
+ if (*end == 'K') { end++; }
+ else if (*end == 'M') { overflow = mi_mul_overflow(size,MI_KiB,&size); end++; }
+ else if (*end == 'G') { overflow = mi_mul_overflow(size,MI_MiB,&size); end++; }
+ else if (*end == 'T') { overflow = mi_mul_overflow(size,MI_GiB,&size); end++; }
+ else { size = (size + MI_KiB - 1) / MI_KiB; }
+ if (end[0] == 'I' && end[1] == 'B') { end += 2; } // KiB, MiB, GiB, TiB
+ else if (*end == 'B') { end++; } // Kb, Mb, Gb, Tb
+ if (overflow || size > MI_MAX_ALLOC_SIZE) { size = (MI_MAX_ALLOC_SIZE / MI_KiB); }
+ value = (size > LONG_MAX ? LONG_MAX : (long)size);
+ }
+ if (*end == 0) {
+ mi_option_set(desc->option, value);
+ }
+ else {
+ // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.
+ desc->init = DEFAULTED;
+ if (desc->option == mi_option_verbose && desc->value == 0) {
+ // if the 'mimalloc_verbose' env var has a bogus value we'd never know
+ // (since the value defaults to 'off') so in that case briefly enable verbose
+ desc->value = 1;
+ _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name);
+ desc->value = 0;
+ }
+ else {
+ _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name);
+ }
+ }
+ }
+ mi_assert_internal(desc->init != UNINIT);
+ }
+ else if (!_mi_preloading()) {
+ desc->init = DEFAULTED;
+ }
+}
diff --git a/compat/mimalloc/os.c b/compat/mimalloc/os.c
new file mode 100644
index 00000000000000..241d6a2bee3487
--- /dev/null
+++ b/compat/mimalloc/os.c
@@ -0,0 +1,770 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
+
+#define mi_os_stat_increase(stat,amount) _mi_stat_increase(&_mi_stats_main.stat, amount)
+#define mi_os_stat_decrease(stat,amount) _mi_stat_decrease(&_mi_stats_main.stat, amount)
+#define mi_os_stat_counter_increase(stat,inc) _mi_stat_counter_increase(&_mi_stats_main.stat, inc)
+
+/* -----------------------------------------------------------
+ Initialization.
+----------------------------------------------------------- */
+#ifndef MI_DEFAULT_VIRTUAL_ADDRESS_BITS
+#if MI_INTPTR_SIZE < 8
+#define MI_DEFAULT_VIRTUAL_ADDRESS_BITS 32
+#else
+#define MI_DEFAULT_VIRTUAL_ADDRESS_BITS 48
+#endif
+#endif
+
+#ifndef MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB
+#if MI_INTPTR_SIZE < 8
+#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 4*MI_MiB // 4 GiB
+#else
+#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 32*MI_MiB // 32 GiB
+#endif
+#endif
+
+static mi_os_mem_config_t mi_os_mem_config = {
+ 4096, // page size
+ 0, // large page size (usually 2MiB)
+ 4096, // allocation granularity
+ MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB,
+ MI_DEFAULT_VIRTUAL_ADDRESS_BITS,
+ true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
+ false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
+ true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
+};
+
+bool _mi_os_has_overcommit(void) {
+ return mi_os_mem_config.has_overcommit;
+}
+
+bool _mi_os_has_virtual_reserve(void) {
+ return mi_os_mem_config.has_virtual_reserve;
+}
+
+
+// OS (small) page size
+size_t _mi_os_page_size(void) {
+ return mi_os_mem_config.page_size;
+}
+
+// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB)
+size_t _mi_os_large_page_size(void) {
+ return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size());
+}
+
+bool _mi_os_canuse_large_page(size_t size, size_t alignment) {
+ // if we have access, check the size and alignment requirements
+ if (mi_os_mem_config.large_page_size == 0) return false;
+ return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0);
+}
+
+// round to a good OS allocation size (bounded by max 12.5% waste)
+size_t _mi_os_good_alloc_size(size_t size) {
+ size_t align_size;
+ if (size < 512*MI_KiB) align_size = _mi_os_page_size();
+ else if (size < 2*MI_MiB) align_size = 64*MI_KiB;
+ else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
+ else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
+ else align_size = 4*MI_MiB;
+ if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow?
+ return _mi_align_up(size, align_size);
+}
+
+void _mi_os_init(void) {
+ _mi_prim_mem_init(&mi_os_mem_config);
+}
+
+
+/* -----------------------------------------------------------
+ Util
+-------------------------------------------------------------- */
+bool _mi_os_decommit(void* addr, size_t size);
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero);
+
+
+/* -----------------------------------------------------------
+ aligned hinting
+-------------------------------------------------------------- */
+
+// On systems with enough virtual address bits, we can do efficient aligned allocation by using
+// the 2TiB to 30TiB area to allocate those. If we have at least 46 bits of virtual address
+// space (64TiB) we use this technique. (but see issue #939)
+#if (MI_INTPTR_SIZE >= 8) && !defined(MI_NO_ALIGNED_HINT)
+static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
+
+// Return a MI_SEGMENT_SIZE aligned address that is probably available.
+// If this returns NULL, the OS will determine the address but on some OS's that may not be
+// properly aligned which can be more costly as it needs to be adjusted afterwards.
+// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization;
+// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses
+// in the middle of the 2TiB - 6TiB address range (see issue #372))
+
+#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start
+#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes)
+#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages)
+
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size)
+{
+ if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL;
+ if (mi_os_mem_config.virtual_address_bits < 46) return NULL; // < 64TiB virtual address space
+ size = _mi_align_up(size, MI_SEGMENT_SIZE);
+ if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096.
+ #if (MI_SECURE>0)
+ size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas.
+ #endif
+
+ uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size);
+ if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize
+ uintptr_t init = MI_HINT_BASE;
+ #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
+ uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
+ init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB
+ #endif
+ uintptr_t expected = hint + size;
+ mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init);
+ hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > MI_HINT_MAX but that is ok, it is a hint after all
+ }
+ if (hint%try_alignment != 0) return NULL;
+ return (void*)hint;
+}
+#else
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
+ MI_UNUSED(try_alignment); MI_UNUSED(size);
+ return NULL;
+}
+#endif
+
+/* -----------------------------------------------------------
+ Free memory
+-------------------------------------------------------------- */
+
+static void mi_os_free_huge_os_pages(void* p, size_t size);
+
+static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) {
+ mi_assert_internal((size % _mi_os_page_size()) == 0);
+ if (addr == NULL) return; // || _mi_os_is_huge_reserved(addr)
+ int err = _mi_prim_free(addr, size); // allow size==0 (issue #1041)
+ if (err != 0) {
+ _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
+ }
+ if (commit_size > 0) {
+ mi_os_stat_decrease(committed, commit_size);
+ }
+ mi_os_stat_decrease(reserved, size);
+}
+
+void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
+ if (mi_memkind_is_os(memid.memkind)) {
+ size_t csize = memid.mem.os.size;
+ if (csize==0) { csize = _mi_os_good_alloc_size(size); }
+ mi_assert_internal(csize >= size);
+ size_t commit_size = (still_committed ? csize : 0);
+ void* base = addr;
+ // different base? (due to alignment)
+ if (memid.mem.os.base != base) {
+ mi_assert(memid.mem.os.base <= addr);
+ base = memid.mem.os.base;
+ const size_t diff = (uint8_t*)addr - (uint8_t*)memid.mem.os.base;
+ if (memid.mem.os.size==0) {
+ csize += diff;
+ }
+ if (still_committed) {
+ commit_size -= diff; // the (addr-base) part was already un-committed
+ }
+ }
+ // free it
+ if (memid.memkind == MI_MEM_OS_HUGE) {
+ mi_assert(memid.is_pinned);
+ mi_os_free_huge_os_pages(base, csize);
+ }
+ else {
+ mi_os_prim_free(base, csize, (still_committed ? commit_size : 0));
+ }
+ }
+ else {
+ // nothing to do
+ mi_assert(memid.memkind < MI_MEM_OS);
+ }
+}
+
+void _mi_os_free(void* p, size_t size, mi_memid_t memid) {
+ _mi_os_free_ex(p, size, true, memid);
+}
+
+
+/* -----------------------------------------------------------
+ Primitive allocation from the OS.
+-------------------------------------------------------------- */
+
+// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
+// Also `hint_addr` is a hint and may be ignored.
+static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(is_zero != NULL);
+ mi_assert_internal(is_large != NULL);
+ if (size == 0) return NULL;
+ if (!commit) { allow_large = false; }
+ if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
+ *is_zero = false;
+ void* p = NULL;
+ int err = _mi_prim_alloc(hint_addr, size, try_alignment, commit, allow_large, is_large, is_zero, &p);
+ if (err != 0) {
+ _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large);
+ }
+
+
+
+ mi_os_stat_counter_increase(mmap_calls, 1);
+ if (p != NULL) {
+ mi_os_stat_increase(reserved, size);
+ if (commit) {
+ mi_os_stat_increase(committed, size);
+ // seems needed for asan (or `mimalloc-test-api` fails)
+ #ifdef MI_TRACK_ASAN
+ if (*is_zero) { mi_track_mem_defined(p,size); }
+ else { mi_track_mem_undefined(p,size); }
+ #endif
+ }
+ }
+ return p;
+}
+
+static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
+ return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero);
+}
+
+
+// Primitive aligned allocation from the OS.
+// This function guarantees the allocated memory is aligned.
+static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) {
+ mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(is_large != NULL);
+ mi_assert_internal(is_zero != NULL);
+ mi_assert_internal(base != NULL);
+ if (!commit) allow_large = false;
+ if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
+ size = _mi_align_up(size, _mi_os_page_size());
+
+ // try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
+ void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero);
+ if (p == NULL) return NULL;
+
+ // aligned already?
+ if (((uintptr_t)p % alignment) == 0) {
+ *base = p;
+ }
+ else {
+ // if not aligned, free it, overallocate, and unmap around it
+ #if !MI_TRACK_ASAN
+ _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
+ #endif
+ if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0)); }
+ if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
+ const size_t over_size = size + alignment;
+
+ if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block
+ // over-allocate uncommitted (virtual) memory
+ p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero);
+ if (p == NULL) return NULL;
+
+ // set p to the aligned part in the full region
+ // note: this is dangerous on Windows as VirtualFree needs the actual base pointer
+ // this is handled though by having the `base` field in the memid's
+ *base = p; // remember the base
+ p = mi_align_up_ptr(p, alignment);
+
+ // explicitly commit only the aligned part
+ if (commit) {
+ if (!_mi_os_commit(p, size, NULL)) {
+ mi_os_prim_free(*base, over_size, 0);
+ return NULL;
+ }
+ }
+ }
+ else { // mmap can free inside an allocation
+ // overallocate...
+ p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero);
+ if (p == NULL) return NULL;
+
+ // and selectively unmap parts around the over-allocated area.
+ void* aligned_p = mi_align_up_ptr(p, alignment);
+ size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
+ size_t mid_size = _mi_align_up(size, _mi_os_page_size());
+ size_t post_size = over_size - pre_size - mid_size;
+ mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
+ if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); }
+ if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); }
+ // we can return the aligned pointer on `mmap` systems
+ p = aligned_p;
+ *base = aligned_p; // since we freed the pre part, `*base == p`.
+ }
+ }
+
+ mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0));
+ return p;
+}
+
+
+/* -----------------------------------------------------------
+ OS API: alloc and alloc_aligned
+----------------------------------------------------------- */
+
+void* _mi_os_alloc(size_t size, mi_memid_t* memid) {
+ *memid = _mi_memid_none();
+ if (size == 0) return NULL;
+ size = _mi_os_good_alloc_size(size);
+ bool os_is_large = false;
+ bool os_is_zero = false;
+ void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
+ if (p == NULL) return NULL;
+
+ *memid = _mi_memid_create_os(p, size, true, os_is_zero, os_is_large);
+ mi_assert_internal(memid->mem.os.size >= size);
+ mi_assert_internal(memid->initially_committed);
+ return p;
+}
+
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid)
+{
+ MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
+ *memid = _mi_memid_none();
+ if (size == 0) return NULL;
+ size = _mi_os_good_alloc_size(size);
+ alignment = _mi_align_up(alignment, _mi_os_page_size());
+
+ bool os_is_large = false;
+ bool os_is_zero = false;
+ void* os_base = NULL;
+ void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
+ if (p == NULL) return NULL;
+
+ *memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large);
+ memid->mem.os.base = os_base;
+ memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned?
+
+ mi_assert_internal(memid->mem.os.size >= size);
+ mi_assert_internal(_mi_is_aligned(p,alignment));
+ if (commit) { mi_assert_internal(memid->initially_committed); }
+ return p;
+}
+
+
+mi_decl_nodiscard static void* mi_os_ensure_zero(void* p, size_t size, mi_memid_t* memid) {
+ if (p==NULL || size==0) return p;
+ // ensure committed
+ if (!memid->initially_committed) {
+ bool is_zero = false;
+ if (!_mi_os_commit(p, size, &is_zero)) {
+ _mi_os_free(p, size, *memid);
+ return NULL;
+ }
+ memid->initially_committed = true;
+ }
+ // ensure zero'd
+ if (memid->initially_zero) return p;
+ _mi_memzero_aligned(p,size);
+ memid->initially_zero = true;
+ return p;
+}
+
+void* _mi_os_zalloc(size_t size, mi_memid_t* memid) {
+ void* p = _mi_os_alloc(size,memid);
+ return mi_os_ensure_zero(p, size, memid);
+}
+
+/* -----------------------------------------------------------
+ OS aligned allocation with an offset. This is used
+ for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc
+ page where the object can be aligned at an offset from the start of the segment.
+ As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
+ to use the actual start of the memory region.
+----------------------------------------------------------- */
+
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid) {
+ mi_assert(offset <= MI_SEGMENT_SIZE);
+ mi_assert(offset <= size);
+ mi_assert((alignment % _mi_os_page_size()) == 0);
+ *memid = _mi_memid_none();
+ if (offset > MI_SEGMENT_SIZE) return NULL;
+ if (offset == 0) {
+ // regular aligned allocation
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
+ }
+ else {
+ // overallocate to align at an offset
+ const size_t extra = _mi_align_up(offset, alignment) - offset;
+ const size_t oversize = size + extra;
+ void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid);
+ if (start == NULL) return NULL;
+
+ void* const p = (uint8_t*)start + extra;
+ mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
+ // decommit the overallocation at the start
+ if (commit && extra > _mi_os_page_size()) {
+ _mi_os_decommit(start, extra);
+ }
+ return p;
+ }
+}
+
+/* -----------------------------------------------------------
+ OS memory API: reset, commit, decommit, protect, unprotect.
+----------------------------------------------------------- */
+
+// OS page align within a given area, either conservative (pages inside the area only),
+// or not (straddling pages outside the area is possible)
+static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) {
+ mi_assert(addr != NULL && size > 0);
+ if (newsize != NULL) *newsize = 0;
+ if (size == 0 || addr == NULL) return NULL;
+
+ // page align conservatively within the range
+ void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size())
+ : mi_align_down_ptr(addr, _mi_os_page_size()));
+ void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size())
+ : mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size()));
+ ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start;
+ if (diff <= 0) return NULL;
+
+ mi_assert_internal((conservative && (size_t)diff <= size) || (!conservative && (size_t)diff >= size));
+ if (newsize != NULL) *newsize = (size_t)diff;
+ return start;
+}
+
+static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* newsize) {
+ return mi_os_page_align_areax(true, addr, size, newsize);
+}
+
+bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size) {
+ if (is_zero != NULL) { *is_zero = false; }
+ mi_os_stat_counter_increase(commit_calls, 1);
+
+ // page align range
+ size_t csize;
+ void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
+ if (csize == 0) return true;
+
+ // commit
+ bool os_is_zero = false;
+ int err = _mi_prim_commit(start, csize, &os_is_zero);
+ if (err != 0) {
+ _mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
+ return false;
+ }
+ if (os_is_zero && is_zero != NULL) {
+ *is_zero = true;
+ mi_assert_expensive(mi_mem_is_zero(start, csize));
+ }
+ // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
+ #ifdef MI_TRACK_ASAN
+ if (os_is_zero) { mi_track_mem_defined(start,csize); }
+ else { mi_track_mem_undefined(start,csize); }
+ #endif
+ mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit
+ return true;
+}
+
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
+ return _mi_os_commit_ex(addr, size, is_zero, size);
+}
+
+static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stat_size) {
+ mi_assert_internal(needs_recommit!=NULL);
+ mi_os_stat_decrease(committed, stat_size);
+
+ // page align
+ size_t csize;
+ void* start = mi_os_page_align_area_conservative(addr, size, &csize);
+ if (csize == 0) return true;
+
+ // decommit
+ *needs_recommit = true;
+ int err = _mi_prim_decommit(start,csize,needs_recommit);
+ if (err != 0) {
+ _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
+ }
+ mi_assert_internal(err == 0);
+ return (err == 0);
+}
+
+bool _mi_os_decommit(void* addr, size_t size) {
+ bool needs_recommit;
+ return mi_os_decommit_ex(addr, size, &needs_recommit, size);
+}
+
+
+// Signal to the OS that the address range is no longer in use
+// but may be used later again. This will release physical memory
+// pages and reduce swapping while keeping the memory committed.
+// We page align to a conservative area inside the range to reset.
+bool _mi_os_reset(void* addr, size_t size) {
+ // page align conservatively within the range
+ size_t csize;
+ void* start = mi_os_page_align_area_conservative(addr, size, &csize);
+ if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)
+ mi_os_stat_counter_increase(reset, csize);
+ mi_os_stat_counter_increase(reset_calls, 1);
+
+ #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
+ memset(start, 0, csize); // pretend it is eagerly reset
+ #endif
+
+ int err = _mi_prim_reset(start, csize);
+ if (err != 0) {
+ _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
+ }
+ return (err == 0);
+}
+
+
+void _mi_os_reuse( void* addr, size_t size ) {
+ // page align conservatively within the range
+ size_t csize = 0;
+ void* const start = mi_os_page_align_area_conservative(addr, size, &csize);
+ if (csize == 0) return;
+ const int err = _mi_prim_reuse(start, csize);
+ if (err != 0) {
+ _mi_warning_message("cannot reuse OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
+ }
+}
+
+// either resets or decommits memory, returns true if the memory needs
+// to be recommitted if it is to be re-used later on.
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size)
+{
+ if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
+ mi_os_stat_counter_increase(purge_calls, 1);
+ mi_os_stat_counter_increase(purged, size);
+
+ if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
+ !_mi_preloading()) // don't decommit during preloading (unsafe)
+ {
+ bool needs_recommit = true;
+ mi_os_decommit_ex(p, size, &needs_recommit, stat_size);
+ return needs_recommit;
+ }
+ else {
+ if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
+ _mi_os_reset(p, size);
+ }
+ return false; // needs no recommit
+ }
+}
+
+// either resets or decommits memory, returns true if the memory needs
+// to be recommitted if it is to be re-used later on.
+bool _mi_os_purge(void* p, size_t size) {
+ return _mi_os_purge_ex(p, size, true, size);
+}
+
+// Protect a region in memory to be not accessible.
+static bool mi_os_protectx(void* addr, size_t size, bool protect) {
+ // page align conservatively within the range
+ size_t csize = 0;
+ void* start = mi_os_page_align_area_conservative(addr, size, &csize);
+ if (csize == 0) return false;
+ /*
+ if (_mi_os_is_huge_reserved(addr)) {
+ _mi_warning_message("cannot mprotect memory allocated in huge OS pages\n");
+ }
+ */
+ int err = _mi_prim_protect(start,csize,protect);
+ if (err != 0) {
+ _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize);
+ }
+ return (err == 0);
+}
+
+bool _mi_os_protect(void* addr, size_t size) {
+ return mi_os_protectx(addr, size, true);
+}
+
+bool _mi_os_unprotect(void* addr, size_t size) {
+ return mi_os_protectx(addr, size, false);
+}
+
+
+
+/* ----------------------------------------------------------------------------
+Support for allocating huge OS pages (1Gib) that are reserved up-front
+and possibly associated with a specific NUMA node. (use `numa_node>=0`)
+-----------------------------------------------------------------------------*/
+#define MI_HUGE_OS_PAGE_SIZE (MI_GiB)
+
+
+#if (MI_INTPTR_SIZE >= 8)
+// To ensure proper alignment, use our own area for huge OS pages
+static mi_decl_cache_align _Atomic(uintptr_t) mi_huge_start; // = 0
+
+// Claim an aligned address range for huge pages
+static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
+ if (total_size != NULL) *total_size = 0;
+ const size_t size = pages * MI_HUGE_OS_PAGE_SIZE;
+
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uintptr_t huge_start = mi_atomic_load_relaxed(&mi_huge_start);
+ do {
+ start = huge_start;
+ if (start == 0) {
+ // Initialize the start address after the 32TiB area
+ start = ((uintptr_t)32 << 40); // 32TiB virtual start address
+ #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
+ uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
+ start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB
+ #endif
+ }
+ end = start + size;
+ mi_assert_internal(end % MI_SEGMENT_SIZE == 0);
+ } while (!mi_atomic_cas_strong_acq_rel(&mi_huge_start, &huge_start, end));
+
+ if (total_size != NULL) *total_size = size;
+ return (uint8_t*)start;
+}
+#else
+static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
+ MI_UNUSED(pages);
+ if (total_size != NULL) *total_size = 0;
+ return NULL;
+}
+#endif
+
+// Allocate MI_SEGMENT_SIZE aligned huge pages
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid) {
+ *memid = _mi_memid_none();
+ if (psize != NULL) *psize = 0;
+ if (pages_reserved != NULL) *pages_reserved = 0;
+ size_t size = 0;
+ uint8_t* const start = mi_os_claim_huge_pages(pages, &size);
+ if (start == NULL) return NULL; // or 32-bit systems
+
+ // Allocate one page at the time but try to place them contiguously
+ // We allocate one page at the time to be able to abort if it takes too long
+ // or to at least allocate as many as available on the system.
+ mi_msecs_t start_t = _mi_clock_start();
+ size_t page = 0;
+ bool all_zero = true;
+ while (page < pages) {
+ // allocate a page
+ bool is_zero = false;
+ void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE);
+ void* p = NULL;
+ int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &is_zero, &p);
+ if (!is_zero) { all_zero = false; }
+ if (err != 0) {
+ _mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE);
+ break;
+ }
+
+ // Did we succeed at a contiguous address?
+ if (p != addr) {
+ // no success, issue a warning and break
+ if (p != NULL) {
+ _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
+ mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
+ }
+ break;
+ }
+
+ // success, record it
+ page++; // increase before timeout check (see issue #711)
+ mi_os_stat_increase(committed, MI_HUGE_OS_PAGE_SIZE);
+ mi_os_stat_increase(reserved, MI_HUGE_OS_PAGE_SIZE);
+
+ // check for timeout
+ if (max_msecs > 0) {
+ mi_msecs_t elapsed = _mi_clock_end(start_t);
+ if (page >= 1) {
+ mi_msecs_t estimate = ((elapsed / (page+1)) * pages);
+ if (estimate > 2*max_msecs) { // seems like we are going to timeout, break
+ elapsed = max_msecs + 1;
+ }
+ }
+ if (elapsed > max_msecs) {
+ _mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page);
+ break;
+ }
+ }
+ }
+ mi_assert_internal(page*MI_HUGE_OS_PAGE_SIZE <= size);
+ if (pages_reserved != NULL) { *pages_reserved = page; }
+ if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; }
+ if (page != 0) {
+ mi_assert(start != NULL);
+ *memid = _mi_memid_create_os(start, size, true /* is committed */, all_zero, true /* is_large */);
+ memid->memkind = MI_MEM_OS_HUGE;
+ mi_assert(memid->is_pinned);
+ #ifdef MI_TRACK_ASAN
+ if (all_zero) { mi_track_mem_defined(start,size); }
+ #endif
+ }
+ return (page == 0 ? NULL : start);
+}
+
+// free every huge page in a range individually (as we allocated per page)
+// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
+static void mi_os_free_huge_os_pages(void* p, size_t size) {
+ if (p==NULL || size==0) return;
+ uint8_t* base = (uint8_t*)p;
+ while (size >= MI_HUGE_OS_PAGE_SIZE) {
+ mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
+ size -= MI_HUGE_OS_PAGE_SIZE;
+ base += MI_HUGE_OS_PAGE_SIZE;
+ }
+}
+
+
+/* ----------------------------------------------------------------------------
+Support NUMA aware allocation
+-----------------------------------------------------------------------------*/
+
+static _Atomic(size_t) mi_numa_node_count; // = 0 // cache the node count
+
+int _mi_os_numa_node_count(void) {
+ size_t count = mi_atomic_load_acquire(&mi_numa_node_count);
+ if mi_unlikely(count == 0) {
+ long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
+ if (ncount > 0 && ncount < INT_MAX) {
+ count = (size_t)ncount;
+ }
+ else {
+ const size_t n = _mi_prim_numa_node_count(); // or detect dynamically
+ if (n == 0 || n > INT_MAX) { count = 1; }
+ else { count = n; }
+ }
+ mi_atomic_store_release(&mi_numa_node_count, count); // save it
+ _mi_verbose_message("using %zd numa regions\n", count);
+ }
+ mi_assert_internal(count > 0 && count <= INT_MAX);
+ return (int)count;
+}
+
+static int mi_os_numa_node_get(void) {
+ int numa_count = _mi_os_numa_node_count();
+ if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
+ // never more than the node count and >= 0
+ const size_t n = _mi_prim_numa_node();
+ int numa_node = (n < INT_MAX ? (int)n : 0);
+ if (numa_node >= numa_count) { numa_node = numa_node % numa_count; }
+ return numa_node;
+}
+
+int _mi_os_numa_node(void) {
+ if mi_likely(mi_atomic_load_relaxed(&mi_numa_node_count) == 1) {
+ return 0;
+ }
+ else {
+ return mi_os_numa_node_get();
+ }
+}
diff --git a/compat/mimalloc/page-queue.c b/compat/mimalloc/page-queue.c
new file mode 100644
index 00000000000000..1f700c6df4c866
--- /dev/null
+++ b/compat/mimalloc/page-queue.c
@@ -0,0 +1,397 @@
+/*----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* -----------------------------------------------------------
+ Definition of page queues for each block size
+----------------------------------------------------------- */
+
+#ifndef MI_IN_PAGE_C
+#error "this file should be included from 'page.c'"
+// include to help an IDE
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#endif
+
+/* -----------------------------------------------------------
+ Minimal alignment in machine words (i.e. `sizeof(void*)`)
+----------------------------------------------------------- */
+
+#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
+ #error "define alignment for more than 4x word size for this platform"
+#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
+ #define MI_ALIGN4W // 4 machine words minimal alignment
+#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
+ #define MI_ALIGN2W // 2 machine words minimal alignment
+#else
+ // ok, default alignment is 1 word
+#endif
+
+
+/* -----------------------------------------------------------
+ Queue query
+----------------------------------------------------------- */
+
+
+static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
+ return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
+}
+
+static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
+ return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
+}
+
+static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
+ return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
+}
+
+/* -----------------------------------------------------------
+ Bins
+----------------------------------------------------------- */
+
+// Return the bin for a given field size.
+// Returns MI_BIN_HUGE if the size is too large.
+// We use `wsize` for the size in "machine word sizes",
+// i.e. byte size == `wsize*sizeof(void*)`.
+static inline size_t mi_bin(size_t size) {
+ size_t wsize = _mi_wsize_from_size(size);
+#if defined(MI_ALIGN4W)
+ if mi_likely(wsize <= 4) {
+ return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes
+ }
+#elif defined(MI_ALIGN2W)
+ if mi_likely(wsize <= 8) {
+ return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes
+ }
+#else
+ if mi_likely(wsize <= 8) {
+ return (wsize == 0 ? 1 : wsize);
+ }
+#endif
+ else if mi_unlikely(wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
+ return MI_BIN_HUGE;
+ }
+ else {
+ #if defined(MI_ALIGN4W)
+ if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
+ #endif
+ wsize--;
+ // find the highest bit
+ const size_t b = (MI_SIZE_BITS - 1 - mi_clz(wsize)); // note: wsize != 0
+ // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
+ // - adjust with 3 because we use do not round the first 8 sizes
+ // which each get an exact bin
+ const size_t bin = ((b << 2) + ((wsize >> (b - 2)) & 0x03)) - 3;
+ mi_assert_internal(bin > 0 && bin < MI_BIN_HUGE);
+ return bin;
+ }
+}
+
+
+
+/* -----------------------------------------------------------
+ Queue of pages with free blocks
+----------------------------------------------------------- */
+
+size_t _mi_bin(size_t size) {
+ return mi_bin(size);
+}
+
+size_t _mi_bin_size(size_t bin) {
+ return _mi_heap_empty.pages[bin].block_size;
+}
+
+// Good size for allocation
+size_t mi_good_size(size_t size) mi_attr_noexcept {
+ if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
+ }
+ else {
+ return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size());
+ }
+}
+
+#if (MI_DEBUG>1)
+static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_page_t* list = queue->first;
+ while (list != NULL) {
+ mi_assert_internal(list->next == NULL || list->next->prev == list);
+ mi_assert_internal(list->prev == NULL || list->prev->next == list);
+ if (list == page) break;
+ list = list->next;
+ }
+ return (list == page);
+}
+
+#endif
+
+#if (MI_DEBUG>1)
+static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
+ return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
+}
+#endif
+
+static inline bool mi_page_is_large_or_huge(const mi_page_t* page) {
+ return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));
+}
+
+static size_t mi_page_bin(const mi_page_t* page) {
+ const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
+ mi_assert_internal(bin <= MI_BIN_FULL);
+ return bin;
+}
+
+// returns the page bin without using MI_BIN_FULL for statistics
+size_t _mi_page_stats_bin(const mi_page_t* page) {
+ const size_t bin = (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)));
+ mi_assert_internal(bin <= MI_BIN_HUGE);
+ return bin;
+}
+
+static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
+ mi_assert_internal(heap!=NULL);
+ const size_t bin = mi_page_bin(page);
+ mi_page_queue_t* pq = &heap->pages[bin];
+ mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
+ (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||
+ (mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
+ return pq;
+}
+
+static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
+ mi_heap_t* heap = mi_page_heap(page);
+ mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
+ mi_assert_expensive(mi_page_queue_contains(pq, page));
+ return pq;
+}
+
+// The current small page array is for efficiency and for each
+// small size (up to 256) it points directly to the page for that
+// size without having to compute the bin. This means when the
+// current free page queue is updated for a small bin, we need to update a
+// range of entries in `_mi_page_small_free`.
+static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
+ mi_assert_internal(mi_heap_contains_queue(heap,pq));
+ size_t size = pq->block_size;
+ if (size > MI_SMALL_SIZE_MAX) return;
+
+ mi_page_t* page = pq->first;
+ if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
+
+ // find index in the right direct page array
+ size_t start;
+ size_t idx = _mi_wsize_from_size(size);
+ mi_page_t** pages_free = heap->pages_free_direct;
+
+ if (pages_free[idx] == page) return; // already set
+
+ // find start slot
+ if (idx<=1) {
+ start = 0;
+ }
+ else {
+ // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
+ size_t bin = mi_bin(size);
+ const mi_page_queue_t* prev = pq - 1;
+ while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
+ prev--;
+ }
+ start = 1 + _mi_wsize_from_size(prev->block_size);
+ if (start > idx) start = idx;
+ }
+
+ // set size range to the right page
+ mi_assert(start <= idx);
+ for (size_t sz = start; sz <= idx; sz++) {
+ pages_free[sz] = page;
+ }
+}
+
+/*
+static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
+ return (queue->first == NULL);
+}
+*/
+
+static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_assert_expensive(mi_page_queue_contains(queue, page));
+ mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
+ (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
+ (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
+ mi_heap_t* heap = mi_page_heap(page);
+
+ if (page->prev != NULL) page->prev->next = page->next;
+ if (page->next != NULL) page->next->prev = page->prev;
+ if (page == queue->last) queue->last = page->prev;
+ if (page == queue->first) {
+ queue->first = page->next;
+ // update first
+ mi_assert_internal(mi_heap_contains_queue(heap, queue));
+ mi_heap_queue_first_update(heap,queue);
+ }
+ heap->page_count--;
+ page->next = NULL;
+ page->prev = NULL;
+ // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
+ mi_page_set_in_full(page,false);
+}
+
+
+static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
+ mi_assert_internal(mi_page_heap(page) == heap);
+ mi_assert_internal(!mi_page_queue_contains(queue, page));
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ #endif
+ mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
+ (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
+ (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
+
+ mi_page_set_in_full(page, mi_page_queue_is_full(queue));
+ // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
+ page->next = queue->first;
+ page->prev = NULL;
+ if (queue->first != NULL) {
+ mi_assert_internal(queue->first->prev == NULL);
+ queue->first->prev = page;
+ queue->first = page;
+ }
+ else {
+ queue->first = queue->last = page;
+ }
+
+ // update direct
+ mi_heap_queue_first_update(heap, queue);
+ heap->page_count++;
+}
+
+static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
+ mi_assert_internal(mi_page_heap(page) == heap);
+ mi_assert_internal(mi_page_queue_contains(queue, page));
+ if (queue->first == page) return;
+ mi_page_queue_remove(queue, page);
+ mi_page_queue_push(heap, queue, page);
+ mi_assert_internal(queue->first == page);
+}
+
+static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_assert_expensive(mi_page_queue_contains(from, page));
+ mi_assert_expensive(!mi_page_queue_contains(to, page));
+ const size_t bsize = mi_page_block_size(page);
+ MI_UNUSED(bsize);
+ mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
+ (bsize == to->block_size && mi_page_queue_is_full(from)) ||
+ (bsize == from->block_size && mi_page_queue_is_full(to)) ||
+ (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||
+ (mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
+
+ mi_heap_t* heap = mi_page_heap(page);
+
+ // delete from `from`
+ if (page->prev != NULL) page->prev->next = page->next;
+ if (page->next != NULL) page->next->prev = page->prev;
+ if (page == from->last) from->last = page->prev;
+ if (page == from->first) {
+ from->first = page->next;
+ // update first
+ mi_assert_internal(mi_heap_contains_queue(heap, from));
+ mi_heap_queue_first_update(heap, from);
+ }
+
+ // insert into `to`
+ if (enqueue_at_end) {
+ // enqueue at the end
+ page->prev = to->last;
+ page->next = NULL;
+ if (to->last != NULL) {
+ mi_assert_internal(heap == mi_page_heap(to->last));
+ to->last->next = page;
+ to->last = page;
+ }
+ else {
+ to->first = page;
+ to->last = page;
+ mi_heap_queue_first_update(heap, to);
+ }
+ }
+ else {
+ if (to->first != NULL) {
+ // enqueue at 2nd place
+ mi_assert_internal(heap == mi_page_heap(to->first));
+ mi_page_t* next = to->first->next;
+ page->prev = to->first;
+ page->next = next;
+ to->first->next = page;
+ if (next != NULL) {
+ next->prev = page;
+ }
+ else {
+ to->last = page;
+ }
+ }
+ else {
+ // enqueue at the head (singleton list)
+ page->prev = NULL;
+ page->next = NULL;
+ to->first = page;
+ to->last = page;
+ mi_heap_queue_first_update(heap, to);
+ }
+ }
+
+ mi_page_set_in_full(page, mi_page_queue_is_full(to));
+}
+
+static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+ mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end */, page);
+}
+
+static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+ // note: we could insert at the front to increase reuse, but it slows down certain benchmarks (like `alloc-test`)
+ mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end of the `to` queue? */, page);
+}
+
+// Only called from `mi_heap_absorb`.
+size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
+ mi_assert_internal(mi_heap_contains_queue(heap,pq));
+ mi_assert_internal(pq->block_size == append->block_size);
+
+ if (append->first==NULL) return 0;
+
+ // set append pages to new heap and count
+ size_t count = 0;
+ for (mi_page_t* page = append->first; page != NULL; page = page->next) {
+ // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
+ // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
+ mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
+ // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
+ // side effect that it spins until any DELAYED_FREEING is finished. This ensures
+ // that after appending only the new heap will be used for delayed free operations.
+ _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
+ count++;
+ }
+
+ if (pq->last==NULL) {
+ // take over afresh
+ mi_assert_internal(pq->first==NULL);
+ pq->first = append->first;
+ pq->last = append->last;
+ mi_heap_queue_first_update(heap, pq);
+ }
+ else {
+ // append to end
+ mi_assert_internal(pq->last!=NULL);
+ mi_assert_internal(append->first!=NULL);
+ pq->last->next = append->first;
+ append->first->prev = pq->last;
+ pq->last = append->last;
+ }
+ return count;
+}
diff --git a/compat/mimalloc/page.c b/compat/mimalloc/page.c
new file mode 100644
index 00000000000000..aeea9eeaa85e0c
--- /dev/null
+++ b/compat/mimalloc/page.c
@@ -0,0 +1,1042 @@
+/*----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* -----------------------------------------------------------
+ The core of the allocator. Every segment contains
+ pages of a certain block size. The main function
+ exported is `mi_malloc_generic`.
+----------------------------------------------------------- */
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+
+/* -----------------------------------------------------------
+ Definition of page queues for each block size
+----------------------------------------------------------- */
+
+#define MI_IN_PAGE_C
+#include "page-queue.c"
+#undef MI_IN_PAGE_C
+
+
+/* -----------------------------------------------------------
+ Page helpers
+----------------------------------------------------------- */
+
+// Index a block in a page
+static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) {
+ MI_UNUSED(page);
+ mi_assert_internal(page != NULL);
+ mi_assert_internal(i <= page->reserved);
+ return (mi_block_t*)((uint8_t*)page_start + (i * block_size));
+}
+
+static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
+static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld);
+
+#if (MI_DEBUG>=3)
+static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
+ size_t count = 0;
+ while (head != NULL) {
+ mi_assert_internal(page == _mi_ptr_page(head));
+ count++;
+ head = mi_block_next(page, head);
+ }
+ return count;
+}
+
+/*
+// Start of the page available memory
+static inline uint8_t* mi_page_area(const mi_page_t* page) {
+ return _mi_page_start(_mi_page_segment(page), page, NULL);
+}
+*/
+
+static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
+ size_t psize;
+ uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
+ mi_block_t* start = (mi_block_t*)page_area;
+ mi_block_t* end = (mi_block_t*)(page_area + psize);
+ while(p != NULL) {
+ if (p < start || p >= end) return false;
+ p = mi_block_next(page, p);
+ }
+#if MI_DEBUG>3 // generally too expensive to check this
+ if (page->free_is_zero) {
+ const size_t ubsize = mi_page_usable_block_size(page);
+ for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
+ mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
+ }
+ }
+#endif
+ return true;
+}
+
+static bool mi_page_is_valid_init(mi_page_t* page) {
+ mi_assert_internal(mi_page_block_size(page) > 0);
+ mi_assert_internal(page->used <= page->capacity);
+ mi_assert_internal(page->capacity <= page->reserved);
+
+ uint8_t* start = mi_page_start(page);
+ mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
+ mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE));
+ //mi_assert_internal(start + page->capacity*page->block_size == page->top);
+
+ mi_assert_internal(mi_page_list_is_valid(page,page->free));
+ mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
+
+ #if MI_DEBUG>3 // generally too expensive to check this
+ if (page->free_is_zero) {
+ const size_t ubsize = mi_page_usable_block_size(page);
+ for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
+ mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
+ }
+ }
+ #endif
+
+ #if !MI_TRACK_ENABLED && !MI_TSAN
+ mi_block_t* tfree = mi_page_thread_free(page);
+ mi_assert_internal(mi_page_list_is_valid(page, tfree));
+ //size_t tfree_count = mi_page_list_count(page, tfree);
+ //mi_assert_internal(tfree_count <= page->thread_freed + 1);
+ #endif
+
+ size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
+ mi_assert_internal(page->used + free_count == page->capacity);
+
+ return true;
+}
+
+extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called?
+
+bool _mi_page_is_valid(mi_page_t* page) {
+ mi_assert_internal(mi_page_is_valid_init(page));
+ #if MI_SECURE
+ mi_assert_internal(page->keys[0] != 0);
+ #endif
+ if (mi_page_heap(page)!=NULL) {
+ mi_segment_t* segment = _mi_page_segment(page);
+
+ mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
+ #if MI_HUGE_PAGE_ABANDON
+ if (segment->kind != MI_SEGMENT_HUGE)
+ #endif
+ {
+ mi_page_queue_t* pq = mi_page_queue_of(page);
+ mi_assert_internal(mi_page_queue_contains(pq, page));
+ mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
+ mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
+ }
+ }
+ return true;
+}
+#endif
+
+void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
+ while (!_mi_page_try_use_delayed_free(page, delay, override_never)) {
+ mi_atomic_yield();
+ }
+}
+
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
+ mi_thread_free_t tfreex;
+ mi_delayed_t old_delay;
+ mi_thread_free_t tfree;
+ size_t yield_count = 0;
+ do {
+ tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
+ tfreex = mi_tf_set_delayed(tfree, delay);
+ old_delay = mi_tf_delayed(tfree);
+ if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
+ if (yield_count >= 4) return false; // give up after 4 tries
+ yield_count++;
+ mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
+ // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
+ }
+ else if (delay == old_delay) {
+ break; // avoid atomic operation if already equal
+ }
+ else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) {
+ break; // leave never-delayed flag set
+ }
+ } while ((old_delay == MI_DELAYED_FREEING) ||
+ !mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
+
+ return true; // success
+}
+
+/* -----------------------------------------------------------
+ Page collect the `local_free` and `thread_free` lists
+----------------------------------------------------------- */
+
+// Collect the local `thread_free` list using an atomic exchange.
+// Note: The exchange must be done atomically as this is used right after
+// moving to the full list in `mi_page_collect_ex` and we need to
+// ensure that there was no race where the page became unfull just before the move.
+static void _mi_page_thread_free_collect(mi_page_t* page)
+{
+ mi_block_t* head;
+ mi_thread_free_t tfreex;
+ mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
+ do {
+ head = mi_tf_block(tfree);
+ tfreex = mi_tf_set_block(tfree,NULL);
+ } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex));
+
+ // return if the list is empty
+ if (head == NULL) return;
+
+ // find the tail -- also to get a proper count (without data races)
+ size_t max_count = page->capacity; // cannot collect more than capacity
+ size_t count = 1;
+ mi_block_t* tail = head;
+ mi_block_t* next;
+ while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
+ count++;
+ tail = next;
+ }
+ // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
+ if (count > max_count) {
+ _mi_error_message(EFAULT, "corrupted thread-free list\n");
+ return; // the thread-free items cannot be freed
+ }
+
+ // and append the current local free list
+ mi_block_set_next(page,tail, page->local_free);
+ page->local_free = head;
+
+ // update counts now
+ page->used -= (uint16_t)count;
+}
+
+void _mi_page_free_collect(mi_page_t* page, bool force) {
+ mi_assert_internal(page!=NULL);
+
+ // collect the thread free list
+ if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation
+ _mi_page_thread_free_collect(page);
+ }
+
+ // and the local free list
+ if (page->local_free != NULL) {
+ if mi_likely(page->free == NULL) {
+ // usual case
+ page->free = page->local_free;
+ page->local_free = NULL;
+ page->free_is_zero = false;
+ }
+ else if (force) {
+ // append -- only on shutdown (force) as this is a linear operation
+ mi_block_t* tail = page->local_free;
+ mi_block_t* next;
+ while ((next = mi_block_next(page, tail)) != NULL) {
+ tail = next;
+ }
+ mi_block_set_next(page, tail, page->free);
+ page->free = page->local_free;
+ page->local_free = NULL;
+ page->free_is_zero = false;
+ }
+ }
+
+ mi_assert_internal(!force || page->local_free == NULL);
+}
+
+
+
+/* -----------------------------------------------------------
+ Page fresh and retire
+----------------------------------------------------------- */
+
+// called from segments when reclaiming abandoned pages
+void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
+ mi_assert_expensive(mi_page_is_valid_init(page));
+
+ mi_assert_internal(mi_page_heap(page) == heap);
+ mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ #endif
+
+ // TODO: push on full queue immediately if it is full?
+ mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
+ mi_page_queue_push(heap, pq, page);
+ mi_assert_expensive(_mi_page_is_valid(page));
+}
+
+// allocate a fresh page from a segment
+static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
+ #if !MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(pq != NULL);
+ mi_assert_internal(mi_heap_contains_queue(heap, pq));
+ mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
+ #endif
+ mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
+ if (page == NULL) {
+ // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
+ return NULL;
+ }
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
+ #endif
+ mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
+ // a fresh page was found, initialize it
+ const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
+ mi_assert_internal(full_block_size >= block_size);
+ mi_page_init(heap, page, full_block_size, heap->tld);
+ mi_heap_stat_increase(heap, pages, 1);
+ mi_heap_stat_increase(heap, page_bins[_mi_page_stats_bin(page)], 1);
+ if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
+ mi_assert_expensive(_mi_page_is_valid(page));
+ return page;
+}
+
+// Get a fresh page to use
+static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
+ mi_assert_internal(mi_heap_contains_queue(heap, pq));
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
+ if (page==NULL) return NULL;
+ mi_assert_internal(pq->block_size==mi_page_block_size(page));
+ mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
+ return page;
+}
+
+/* -----------------------------------------------------------
+ Do any delayed frees
+ (put there by other threads if they deallocated in a full page)
+----------------------------------------------------------- */
+void _mi_heap_delayed_free_all(mi_heap_t* heap) {
+ while (!_mi_heap_delayed_free_partial(heap)) {
+ mi_atomic_yield();
+ }
+}
+
+// returns true if all delayed frees were processed
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap) {
+ // take over the list (note: no atomic exchange since it is often NULL)
+ mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
+ while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ };
+ bool all_freed = true;
+
+ // and free them all
+ while(block != NULL) {
+ mi_block_t* next = mi_block_nextx(heap,block, heap->keys);
+ // use internal free instead of regular one to keep stats etc correct
+ if (!_mi_free_delayed_block(block)) {
+ // we might already start delayed freeing while another thread has not yet
+ // reset the delayed_freeing flag; in that case delay it further by reinserting the current block
+ // into the delayed free list
+ all_freed = false;
+ mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
+ do {
+ mi_block_set_nextx(heap, block, dfree, heap->keys);
+ } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
+ }
+ block = next;
+ }
+ return all_freed;
+}
+
+/* -----------------------------------------------------------
+ Unfull, abandon, free and retire
+----------------------------------------------------------- */
+
+// Move a page from the full list back to a regular list
+void _mi_page_unfull(mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_assert_expensive(_mi_page_is_valid(page));
+ mi_assert_internal(mi_page_is_in_full(page));
+ if (!mi_page_is_in_full(page)) return;
+
+ mi_heap_t* heap = mi_page_heap(page);
+ mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
+ mi_page_set_in_full(page, false); // to get the right queue
+ mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
+ mi_page_set_in_full(page, true);
+ mi_page_queue_enqueue_from_full(pq, pqfull, page);
+}
+
+static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
+ mi_assert_internal(pq == mi_page_queue_of(page));
+ mi_assert_internal(!mi_page_immediate_available(page));
+ mi_assert_internal(!mi_page_is_in_full(page));
+
+ if (mi_page_is_in_full(page)) return;
+ mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
+ _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
+}
+
+
+// Abandon a page with used blocks at the end of a thread.
+// Note: only call if it is ensured that no references exist from
+// the `page->heap->thread_delayed_free` into this page.
+// Currently only called through `mi_heap_collect_ex` which ensures this.
+void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
+ mi_assert_internal(page != NULL);
+ mi_assert_expensive(_mi_page_is_valid(page));
+ mi_assert_internal(pq == mi_page_queue_of(page));
+ mi_assert_internal(mi_page_heap(page) != NULL);
+
+ mi_heap_t* pheap = mi_page_heap(page);
+
+ // remove from our page list
+ mi_segments_tld_t* segments_tld = &pheap->tld->segments;
+ mi_page_queue_remove(pq, page);
+
+ // page is no longer associated with our heap
+ mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
+ mi_page_set_heap(page, NULL);
+
+#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
+ // check there are no references left..
+ for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
+ mi_assert_internal(_mi_ptr_page(block) != page);
+ }
+#endif
+
+ // and abandon it
+ mi_assert_internal(mi_page_heap(page) == NULL);
+ _mi_segment_page_abandon(page,segments_tld);
+}
+
+// force abandon a page
+void _mi_page_force_abandon(mi_page_t* page) {
+ mi_heap_t* heap = mi_page_heap(page);
+ // mark page as not using delayed free
+ _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
+
+ // ensure this page is no longer in the heap delayed free list
+ _mi_heap_delayed_free_all(heap);
+ // We can still access the page meta-info even if it is freed as we ensure
+ // in `mi_segment_force_abandon` that the segment is not freed (yet)
+ if (page->capacity == 0) return; // it may have been freed now
+
+ // and now unlink it from the page queue and abandon (or free)
+ mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
+ if (mi_page_all_free(page)) {
+ _mi_page_free(page, pq, false);
+ }
+ else {
+ _mi_page_abandon(page, pq);
+ }
+}
+
+
+// Free a page with no more free blocks
+void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
+ mi_assert_internal(page != NULL);
+ mi_assert_expensive(_mi_page_is_valid(page));
+ mi_assert_internal(pq == mi_page_queue_of(page));
+ mi_assert_internal(mi_page_all_free(page));
+ mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING);
+
+ // no more aligned blocks in here
+ mi_page_set_has_aligned(page, false);
+
+ // remove from the page list
+ // (no need to do _mi_heap_delayed_free first as all blocks are already free)
+ mi_heap_t* heap = mi_page_heap(page);
+ mi_segments_tld_t* segments_tld = &heap->tld->segments;
+ mi_page_queue_remove(pq, page);
+
+ // and free it
+ mi_page_set_heap(page,NULL);
+ _mi_segment_page_free(page, force, segments_tld);
+}
+
+#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
+#define MI_RETIRE_CYCLES (16)
+
+// Retire a page with no more used blocks
+// Important to not retire too quickly though as new
+// allocations might coming.
+// Note: called from `mi_free` and benchmarks often
+// trigger this due to freeing everything and then
+// allocating again so careful when changing this.
+void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
+ mi_assert_internal(page != NULL);
+ mi_assert_expensive(_mi_page_is_valid(page));
+ mi_assert_internal(mi_page_all_free(page));
+
+ mi_page_set_has_aligned(page, false);
+
+ // don't retire too often..
+ // (or we end up retiring and re-allocating most of the time)
+ // NOTE: refine this more: we should not retire if this
+ // is the only page left with free blocks. It is not clear
+ // how to check this efficiently though...
+ // for now, we don't retire if it is the only page left of this size class.
+ mi_page_queue_t* pq = mi_page_queue_of(page);
+ #if MI_RETIRE_CYCLES > 0
+ const size_t bsize = mi_page_block_size(page);
+ if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
+ if (pq->last==page && pq->first==page) { // the only page in the queue?
+ mi_stat_counter_increase(_mi_stats_main.pages_retire,1);
+ page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
+ mi_heap_t* heap = mi_page_heap(page);
+ mi_assert_internal(pq >= heap->pages);
+ const size_t index = pq - heap->pages;
+ mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
+ if (index < heap->page_retired_min) heap->page_retired_min = index;
+ if (index > heap->page_retired_max) heap->page_retired_max = index;
+ mi_assert_internal(mi_page_all_free(page));
+ return; // don't free after all
+ }
+ }
+ #endif
+ _mi_page_free(page, pq, false);
+}
+
+// free retired pages: we don't need to look at the entire queues
+// since we only retire pages that are at the head position in a queue.
+void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
+ size_t min = MI_BIN_FULL;
+ size_t max = 0;
+ for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
+ mi_page_queue_t* pq = &heap->pages[bin];
+ mi_page_t* page = pq->first;
+ if (page != NULL && page->retire_expire != 0) {
+ if (mi_page_all_free(page)) {
+ page->retire_expire--;
+ if (force || page->retire_expire == 0) {
+ _mi_page_free(pq->first, pq, force);
+ }
+ else {
+ // keep retired, update min/max
+ if (bin < min) min = bin;
+ if (bin > max) max = bin;
+ }
+ }
+ else {
+ page->retire_expire = 0;
+ }
+ }
+ }
+ heap->page_retired_min = min;
+ heap->page_retired_max = max;
+}
+
+
+/* -----------------------------------------------------------
+ Initialize the initial free list in a page.
+ In secure mode we initialize a randomized list by
+ alternating between slices.
+----------------------------------------------------------- */
+
+#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices
+#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT)
+#define MI_MIN_SLICES (2)
+
+static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) {
+ MI_UNUSED(stats);
+ #if (MI_SECURE<=2)
+ mi_assert_internal(page->free == NULL);
+ mi_assert_internal(page->local_free == NULL);
+ #endif
+ mi_assert_internal(page->capacity + extend <= page->reserved);
+ mi_assert_internal(bsize == mi_page_block_size(page));
+ void* const page_area = mi_page_start(page);
+
+ // initialize a randomized free list
+ // set up `slice_count` slices to alternate between
+ size_t shift = MI_MAX_SLICE_SHIFT;
+ while ((extend >> shift) == 0) {
+ shift--;
+ }
+ const size_t slice_count = (size_t)1U << shift;
+ const size_t slice_extend = extend / slice_count;
+ mi_assert_internal(slice_extend >= 1);
+ mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice
+ size_t counts[MI_MAX_SLICES]; // available objects in the slice
+ for (size_t i = 0; i < slice_count; i++) {
+ blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend);
+ counts[i] = slice_extend;
+ }
+ counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?)
+
+ // and initialize the free list by randomly threading through them
+ // set up first element
+ const uintptr_t r = _mi_heap_random_next(heap);
+ size_t current = r % slice_count;
+ counts[current]--;
+ mi_block_t* const free_start = blocks[current];
+ // and iterate through the rest; use `random_shuffle` for performance
+ uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0
+ for (size_t i = 1; i < extend; i++) {
+ // call random_shuffle only every INTPTR_SIZE rounds
+ const size_t round = i%MI_INTPTR_SIZE;
+ if (round == 0) rnd = _mi_random_shuffle(rnd);
+ // select a random next slice index
+ size_t next = ((rnd >> 8*round) & (slice_count-1));
+ while (counts[next]==0) { // ensure it still has space
+ next++;
+ if (next==slice_count) next = 0;
+ }
+ // and link the current block to it
+ counts[next]--;
+ mi_block_t* const block = blocks[current];
+ blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block
+ mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
+ current = next;
+ }
+ // prepend to the free list (usually NULL)
+ mi_block_set_next(page, blocks[current], page->free); // end of the list
+ page->free = free_start;
+}
+
+static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats)
+{
+ MI_UNUSED(stats);
+ #if (MI_SECURE <= 2)
+ mi_assert_internal(page->free == NULL);
+ mi_assert_internal(page->local_free == NULL);
+ #endif
+ mi_assert_internal(page->capacity + extend <= page->reserved);
+ mi_assert_internal(bsize == mi_page_block_size(page));
+ void* const page_area = mi_page_start(page);
+
+ mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
+
+ // initialize a sequential free list
+ mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1);
+ mi_block_t* block = start;
+ while(block <= last) {
+ mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
+ mi_block_set_next(page,block,next);
+ block = next;
+ }
+ // prepend to free list (usually `NULL`)
+ mi_block_set_next(page, last, page->free);
+ page->free = start;
+}
+
+/* -----------------------------------------------------------
+ Page initialize and extend the capacity
+----------------------------------------------------------- */
+
+#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
+#if (MI_SECURE>0)
+#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
+#else
+#define MI_MIN_EXTEND (4)
+#endif
+
+// Extend the capacity (up to reserved) by initializing a free list
+// We do at most `MI_MAX_EXTEND` to avoid touching too much memory
+// Note: we also experimented with "bump" allocation on the first
+// allocations but this did not speed up any benchmark (due to an
+// extra test in malloc? or cache effects?)
+static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
+ mi_assert_expensive(mi_page_is_valid_init(page));
+ #if (MI_SECURE<=2)
+ mi_assert(page->free == NULL);
+ mi_assert(page->local_free == NULL);
+ if (page->free != NULL) return true;
+ #endif
+ if (page->capacity >= page->reserved) return true;
+
+ mi_stat_counter_increase(tld->stats.pages_extended, 1);
+
+ // calculate the extend count
+ const size_t bsize = mi_page_block_size(page);
+ size_t extend = page->reserved - page->capacity;
+ mi_assert_internal(extend > 0);
+
+ size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize);
+ if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
+ mi_assert_internal(max_extend > 0);
+
+ if (extend > max_extend) {
+ // ensure we don't touch memory beyond the page to reduce page commit.
+ // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
+ extend = max_extend;
+ }
+
+ mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
+ mi_assert_internal(extend < (1UL<<16));
+
+ // and append the extend the free list
+ if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
+ mi_page_free_list_extend(page, bsize, extend, &tld->stats );
+ }
+ else {
+ mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats);
+ }
+ // enable the new free list
+ page->capacity += (uint16_t)extend;
+ mi_stat_increase(tld->stats.page_committed, extend * bsize);
+ mi_assert_expensive(mi_page_is_valid_init(page));
+ return true;
+}
+
+// Initialize a fresh page
+static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) {
+ mi_assert(page != NULL);
+ mi_segment_t* segment = _mi_page_segment(page);
+ mi_assert(segment != NULL);
+ mi_assert_internal(block_size > 0);
+ // set fields
+ mi_page_set_heap(page, heap);
+ page->block_size = block_size;
+ size_t page_size;
+ page->page_start = _mi_segment_page_start(segment, page, &page_size);
+ mi_track_mem_noaccess(page->page_start,page_size);
+ mi_assert_internal(mi_page_block_size(page) <= page_size);
+ mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
+ mi_assert_internal(page_size / block_size < (1L<<16));
+ page->reserved = (uint16_t)(page_size / block_size);
+ mi_assert_internal(page->reserved > 0);
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
+ page->keys[0] = _mi_heap_random_next(heap);
+ page->keys[1] = _mi_heap_random_next(heap);
+ #endif
+ page->free_is_zero = page->is_zero_init;
+ #if MI_DEBUG>2
+ if (page->is_zero_init) {
+ mi_track_mem_defined(page->page_start, page_size);
+ mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
+ }
+ #endif
+ mi_assert_internal(page->is_committed);
+ if (block_size > 0 && _mi_is_power_of_two(block_size)) {
+ page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
+ }
+ else {
+ page->block_size_shift = 0;
+ }
+
+ mi_assert_internal(page->capacity == 0);
+ mi_assert_internal(page->free == NULL);
+ mi_assert_internal(page->used == 0);
+ mi_assert_internal(page->xthread_free == 0);
+ mi_assert_internal(page->next == NULL);
+ mi_assert_internal(page->prev == NULL);
+ mi_assert_internal(page->retire_expire == 0);
+ mi_assert_internal(!mi_page_has_aligned(page));
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
+ mi_assert_internal(page->keys[0] != 0);
+ mi_assert_internal(page->keys[1] != 0);
+ #endif
+ mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift)));
+ mi_assert_expensive(mi_page_is_valid_init(page));
+
+ // initialize an initial free list
+ if (mi_page_extend_free(heap,page,tld)) {
+ mi_assert(mi_page_immediate_available(page));
+ }
+ return;
+}
+
+
+/* -----------------------------------------------------------
+ Find pages with free blocks
+-------------------------------------------------------------*/
+
+// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
+#define MI_MAX_CANDIDATE_SEARCH (4)
+
+// is the page not yet used up to its reserved space?
+static bool mi_page_is_expandable(const mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_assert_internal(page->capacity <= page->reserved);
+ return (page->capacity < page->reserved);
+}
+
+
+// Find a page with free blocks of `page->block_size`.
+static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
+{
+ // search through the pages in "next fit" order
+ #if MI_STAT
+ size_t count = 0;
+ #endif
+ size_t candidate_count = 0; // we reset this on the first candidate to limit the search
+ mi_page_t* page_candidate = NULL; // a page with free space
+ mi_page_t* page = pq->first;
+
+ while (page != NULL)
+ {
+ mi_page_t* next = page->next; // remember next
+ #if MI_STAT
+ count++;
+ #endif
+ candidate_count++;
+
+ // collect freed blocks by us and other threads
+ _mi_page_free_collect(page, false);
+
+ #if MI_MAX_CANDIDATE_SEARCH > 1
+ // search up to N pages for a best candidate
+
+ // is the local free list non-empty?
+ const bool immediate_available = mi_page_immediate_available(page);
+
+ // if the page is completely full, move it to the `mi_pages_full`
+ // queue so we don't visit long-lived pages too often.
+ if (!immediate_available && !mi_page_is_expandable(page)) {
+ mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
+ mi_page_to_full(page, pq);
+ }
+ else {
+ // the page has free space, make it a candidate
+ // we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
+ if (page_candidate == NULL) {
+ page_candidate = page;
+ candidate_count = 0;
+ }
+ // prefer to reuse fuller pages (in the hope the less used page gets freed)
+ else if (page->used >= page_candidate->used && !mi_page_is_mostly_used(page) && !mi_page_is_expandable(page)) {
+ page_candidate = page;
+ }
+ // if we find a non-expandable candidate, or searched for N pages, return with the best candidate
+ if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
+ mi_assert_internal(page_candidate!=NULL);
+ break;
+ }
+ }
+ #else
+ // first-fit algorithm
+ // If the page contains free blocks, we are done
+ if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
+ break; // pick this one
+ }
+
+ // If the page is completely full, move it to the `mi_pages_full`
+ // queue so we don't visit long-lived pages too often.
+ mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
+ mi_page_to_full(page, pq);
+ #endif
+
+ page = next;
+ } // for each page
+
+ mi_heap_stat_counter_increase(heap, page_searches, count);
+ mi_heap_stat_counter_increase(heap, page_searches_count, 1);
+
+ // set the page to the best candidate
+ if (page_candidate != NULL) {
+ page = page_candidate;
+ }
+ if (page != NULL) {
+ if (!mi_page_immediate_available(page)) {
+ mi_assert_internal(mi_page_is_expandable(page));
+ if (!mi_page_extend_free(heap, page, heap->tld)) {
+ page = NULL; // failed to extend
+ }
+ }
+ mi_assert_internal(page == NULL || mi_page_immediate_available(page));
+ }
+
+ if (page == NULL) {
+ _mi_heap_collect_retired(heap, false); // perhaps make a page available?
+ page = mi_page_fresh(heap, pq);
+ if (page == NULL && first_try) {
+ // out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
+ page = mi_page_queue_find_free_ex(heap, pq, false);
+ }
+ }
+ else {
+ // move the page to the front of the queue
+ mi_page_queue_move_to_front(heap, pq, page);
+ page->retire_expire = 0;
+ // _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this
+ }
+ mi_assert_internal(page == NULL || mi_page_immediate_available(page));
+
+
+ return page;
+}
+
+
+
+// Find a page with free blocks of `size`.
+static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
+ mi_page_queue_t* pq = mi_page_queue(heap, size);
+
+ // check the first page: we even do this with candidate search or otherwise we re-search every time
+ mi_page_t* page = pq->first;
+ if (page != NULL) {
+ #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
+ if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
+ mi_page_extend_free(heap, page, heap->tld);
+ mi_assert_internal(mi_page_immediate_available(page));
+ }
+ else
+ #endif
+ {
+ _mi_page_free_collect(page,false);
+ }
+
+ if (mi_page_immediate_available(page)) {
+ page->retire_expire = 0;
+ return page; // fast path
+ }
+ }
+
+ return mi_page_queue_find_free_ex(heap, pq, true);
+}
+
+
+/* -----------------------------------------------------------
+ Users can register a deferred free function called
+ when the `free` list is empty. Since the `local_free`
+ is separate this is deterministically called after
+ a certain number of allocations.
+----------------------------------------------------------- */
+
+static mi_deferred_free_fun* volatile deferred_free = NULL;
+static _Atomic(void*) deferred_arg; // = NULL
+
+void _mi_deferred_free(mi_heap_t* heap, bool force) {
+ heap->tld->heartbeat++;
+ if (deferred_free != NULL && !heap->tld->recurse) {
+ heap->tld->recurse = true;
+ deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
+ heap->tld->recurse = false;
+ }
+}
+
+void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept {
+ deferred_free = fn;
+ mi_atomic_store_ptr_release(void,&deferred_arg, arg);
+}
+
+
+/* -----------------------------------------------------------
+ General allocation
+----------------------------------------------------------- */
+
+// Large and huge page allocation.
+// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
+// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
+// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
+static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
+ size_t block_size = _mi_os_good_alloc_size(size);
+ mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
+ bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
+ #else
+ mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
+ mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
+ #endif
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
+ if (page != NULL) {
+ mi_assert_internal(mi_page_immediate_available(page));
+
+ if (is_huge) {
+ mi_assert_internal(mi_page_is_huge(page));
+ mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
+ mi_assert_internal(_mi_page_segment(page)->used==1);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
+ mi_page_set_heap(page, NULL);
+ #endif
+ }
+ else {
+ mi_assert_internal(!mi_page_is_huge(page));
+ }
+
+ const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
+ /*if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_increase(heap, malloc_large, bsize);
+ mi_heap_stat_counter_increase(heap, malloc_large_count, 1);
+ }
+ else */
+ {
+ _mi_stat_increase(&heap->tld->stats.malloc_huge, bsize);
+ _mi_stat_counter_increase(&heap->tld->stats.malloc_huge_count, 1);
+ }
+ }
+ return page;
+}
+
+
+// Allocate a page
+// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
+static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
+ // huge allocation?
+ const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
+ if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
+ if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) {
+ _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
+ return NULL;
+ }
+ else {
+ return mi_large_huge_page_alloc(heap,size,huge_alignment);
+ }
+ }
+ else {
+ // otherwise find a page with free blocks in our size segregated queues
+ #if MI_PADDING
+ mi_assert_internal(size >= MI_PADDING_SIZE);
+ #endif
+ return mi_find_free_page(heap, size);
+ }
+}
+
+// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
+// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
+// The `huge_alignment` is normally 0 but is set to a multiple of MI_SLICE_SIZE for
+// very large requested alignments in which case we use a huge singleton page.
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment, size_t* usable) mi_attr_noexcept
+{
+ mi_assert_internal(heap != NULL);
+
+ // initialize if necessary
+ if mi_unlikely(!mi_heap_is_initialized(heap)) {
+ heap = mi_heap_get_default(); // calls mi_thread_init
+ if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
+ }
+ mi_assert_internal(mi_heap_is_initialized(heap));
+
+ // do administrative tasks every N generic mallocs
+ if mi_unlikely(++heap->generic_count >= 100) {
+ heap->generic_collect_count += heap->generic_count;
+ heap->generic_count = 0;
+ // call potential deferred free routines
+ _mi_deferred_free(heap, false);
+
+ // free delayed frees from other threads (but skip contended ones)
+ _mi_heap_delayed_free_partial(heap);
+
+ // collect every once in a while (10000 by default)
+ const long generic_collect = mi_option_get_clamp(mi_option_generic_collect, 1, 1000000L);
+ if (heap->generic_collect_count >= generic_collect) {
+ heap->generic_collect_count = 0;
+ mi_heap_collect(heap, false /* force? */);
+ }
+ }
+
+ // find (or allocate) a page of the right size
+ mi_page_t* page = mi_find_page(heap, size, huge_alignment);
+ if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
+ mi_heap_collect(heap, true /* force */);
+ page = mi_find_page(heap, size, huge_alignment);
+ }
+
+ if mi_unlikely(page == NULL) { // out of memory
+ const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
+ _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
+ return NULL;
+ }
+
+ mi_assert_internal(mi_page_immediate_available(page));
+ mi_assert_internal(mi_page_block_size(page) >= size);
+
+ // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
+ void* const p = _mi_page_malloc_zero(heap, page, size, zero, usable);
+ mi_assert_internal(p != NULL);
+
+ // move singleton pages to the full queue
+ if (page->reserved == page->used) {
+ mi_page_to_full(page, mi_page_queue_of(page));
+ }
+ return p;
+}
diff --git a/compat/mimalloc/prim/osx/prim.c b/compat/mimalloc/prim/osx/prim.c
new file mode 100644
index 00000000000000..8a2f4e8aa47316
--- /dev/null
+++ b/compat/mimalloc/prim/osx/prim.c
@@ -0,0 +1,9 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// We use the unix/prim.c with the mmap API on macOSX
+#include "../unix/prim.c"
diff --git a/compat/mimalloc/prim/prim.c b/compat/mimalloc/prim/prim.c
new file mode 100644
index 00000000000000..5147bae81feaaf
--- /dev/null
+++ b/compat/mimalloc/prim/prim.c
@@ -0,0 +1,76 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// Select the implementation of the primitives
+// depending on the OS.
+
+#if defined(_WIN32)
+#include "windows/prim.c" // VirtualAlloc (Windows)
+
+#elif defined(__APPLE__)
+#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c)
+
+#elif defined(__wasi__)
+#define MI_USE_SBRK
+#include "wasi/prim.c" // memory-grow or sbrk (Wasm)
+
+#elif defined(__EMSCRIPTEN__)
+#include "emscripten/prim.c" // emmalloc_*, + pthread support
+
+#else
+#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)
+
+#endif
+
+// Generic process initialization
+#ifndef MI_PRIM_HAS_PROCESS_ATTACH
+#if defined(__GNUC__) || defined(__clang__)
+ // gcc,clang: use the constructor/destructor attribute
+ // which for both seem to run before regular constructors/destructors
+ #if defined(__clang__)
+ #define mi_attr_constructor __attribute__((constructor(101)))
+ #define mi_attr_destructor __attribute__((destructor(101)))
+ #else
+ #define mi_attr_constructor __attribute__((constructor))
+ #define mi_attr_destructor __attribute__((destructor))
+ #endif
+ static void mi_attr_constructor mi_process_attach(void) {
+ _mi_auto_process_init();
+ }
+ static void mi_attr_destructor mi_process_detach(void) {
+ _mi_auto_process_done();
+ }
+#elif defined(__cplusplus)
+ // C++: use static initialization to detect process start/end
+ // This is not guaranteed to be first/last but the best we can generally do?
+ struct mi_init_done_t {
+ mi_init_done_t() {
+ _mi_auto_process_init();
+ }
+ ~mi_init_done_t() {
+ _mi_auto_process_done();
+ }
+ };
+ static mi_init_done_t mi_init_done;
+ #else
+ #pragma message("define a way to call _mi_auto_process_init/done on your platform")
+#endif
+#endif
+
+// Generic allocator init/done callback
+#ifndef MI_PRIM_HAS_ALLOCATOR_INIT
+bool _mi_is_redirected(void) {
+ return false;
+}
+bool _mi_allocator_init(const char** message) {
+ if (message != NULL) { *message = NULL; }
+ return true;
+}
+void _mi_allocator_done(void) {
+ // nothing to do
+}
+#endif
diff --git a/compat/mimalloc/prim/unix/prim.c b/compat/mimalloc/prim/unix/prim.c
new file mode 100644
index 00000000000000..99331e3f8215c1
--- /dev/null
+++ b/compat/mimalloc/prim/unix/prim.c
@@ -0,0 +1,962 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// This file is included in `src/prim/prim.c`
+
+#ifndef _DEFAULT_SOURCE
+#define _DEFAULT_SOURCE // ensure mmap flags and syscall are defined
+#endif
+
+#if defined(__sun)
+// illumos provides new mman.h api when any of these are defined
+// otherwise the old api based on caddr_t which predates the void pointers one.
+// stock solaris provides only the former, chose to atomically to discard those
+// flags only here rather than project wide tough.
+#undef _XOPEN_SOURCE
+#undef _POSIX_C_SOURCE
+#endif
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h"
+
+#include // mmap
+#include // sysconf
+#include // open, close, read, access
+#include // getenv, arc4random_buf
+
+#if defined(__linux__)
+ #include
+ #include // THP disable, PR_SET_VMA
+ #include // sysinfo
+ #if defined(__GLIBC__) && !defined(PR_SET_VMA)
+ #include
+ #endif
+ #if defined(__GLIBC__)
+ #include // linux mmap flags
+ #else
+ #include
+ #endif
+#elif defined(__APPLE__)
+ #include
+ #include
+ #if !defined(TARGET_OS_OSX) || TARGET_OS_OSX // see issue #879, used to be (!TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR)
+ #include // VM_MAKE_TAG, VM_FLAGS_SUPERPAGE_SIZE_2MB, etc.
+ #endif
+ #if !defined(MAC_OS_X_VERSION_10_7)
+ #define MAC_OS_X_VERSION_10_7 1070
+ #endif
+ #include
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
+ #include
+ #if __FreeBSD_version >= 1200000
+ #include
+ #include
+ #endif
+ #include
+#endif
+
+#if (defined(__linux__) && !defined(__ANDROID__)) || defined(__FreeBSD__)
+ #define MI_HAS_SYSCALL_H
+ #include
+#endif
+
+#if !defined(MADV_DONTNEED) && defined(POSIX_MADV_DONTNEED) // QNX
+#define MADV_DONTNEED POSIX_MADV_DONTNEED
+#endif
+#if !defined(MADV_FREE) && defined(POSIX_MADV_FREE) // QNX
+#define MADV_FREE POSIX_MADV_FREE
+#endif
+
+#define MI_UNIX_LARGE_PAGE_SIZE (2*MI_MiB) // TODO: can we query the OS for this?
+
+//------------------------------------------------------------------------------------
+// Use syscalls for some primitives to allow for libraries that override open/read/close etc.
+// and do allocation themselves; using syscalls prevents recursion when mimalloc is
+// still initializing (issue #713)
+// Declare inline to avoid unused function warnings.
+//------------------------------------------------------------------------------------
+
+#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access)
+
+static inline int mi_prim_open(const char* fpath, int open_flags) {
+ return syscall(SYS_open,fpath,open_flags,0);
+}
+static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) {
+ return syscall(SYS_read,fd,buf,bufsize);
+}
+static inline int mi_prim_close(int fd) {
+ return syscall(SYS_close,fd);
+}
+static inline int mi_prim_access(const char *fpath, int mode) {
+ return syscall(SYS_access,fpath,mode);
+}
+
+#else
+
+static inline int mi_prim_open(const char* fpath, int open_flags) {
+ return open(fpath,open_flags);
+}
+static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) {
+ return read(fd,buf,bufsize);
+}
+static inline int mi_prim_close(int fd) {
+ return close(fd);
+}
+static inline int mi_prim_access(const char *fpath, int mode) {
+ return access(fpath,mode);
+}
+
+#endif
+
+
+
+//---------------------------------------------
+// init
+//---------------------------------------------
+
+static bool unix_detect_overcommit(void) {
+ bool os_overcommit = true;
+ #if defined(__linux__)
+ int fd = mi_prim_open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd >= 0) {
+ char buf[32];
+ ssize_t nread = mi_prim_read(fd, &buf, sizeof(buf));
+ mi_prim_close(fd);
+ //
+ // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE)
+ if (nread >= 1) {
+ os_overcommit = (buf[0] == '0' || buf[0] == '1');
+ }
+ }
+ #elif defined(__FreeBSD__)
+ int val = 0;
+ size_t olen = sizeof(val);
+ if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) {
+ os_overcommit = (val != 0);
+ }
+ #else
+ // default: overcommit is true
+ #endif
+ return os_overcommit;
+}
+
+// try to detect the physical memory dynamically (if possible)
+static void unix_detect_physical_memory( size_t page_size, size_t* physical_memory_in_kib ) {
+ #if defined(CTL_HW) && (defined(HW_PHYSMEM64) || defined(HW_MEMSIZE)) // freeBSD, macOS
+ MI_UNUSED(page_size);
+ int64_t physical_memory = 0;
+ size_t length = sizeof(int64_t);
+ #if defined(HW_PHYSMEM64)
+ int mib[2] = { CTL_HW, HW_PHYSMEM64 };
+ #else
+ int mib[2] = { CTL_HW, HW_MEMSIZE };
+ #endif
+ const int err = sysctl(mib, 2, &physical_memory, &length, NULL, 0);
+ if (err==0 && physical_memory > 0) {
+ const int64_t phys_in_kib = physical_memory / MI_KiB;
+ if (phys_in_kib > 0 && (uint64_t)phys_in_kib <= SIZE_MAX) {
+ *physical_memory_in_kib = (size_t)phys_in_kib;
+ }
+ }
+ #elif defined(__linux__)
+ MI_UNUSED(page_size);
+ struct sysinfo info; _mi_memzero_var(info);
+ const int err = sysinfo(&info);
+ if (err==0 && info.totalram > 0 && info.totalram <= SIZE_MAX) {
+ *physical_memory_in_kib = (size_t)info.totalram / MI_KiB;
+ }
+ #elif defined(_SC_PHYS_PAGES) // do not use by default as it might cause allocation (by using `fopen` to parse /proc/meminfo) (issue #1100)
+ const long pphys = sysconf(_SC_PHYS_PAGES);
+ const size_t psize_in_kib = page_size / MI_KiB;
+ if (psize_in_kib > 0 && pphys > 0 && (unsigned long)pphys <= SIZE_MAX && (size_t)pphys <= (SIZE_MAX/psize_in_kib)) {
+ *physical_memory_in_kib = (size_t)pphys * psize_in_kib;
+ }
+ #endif
+}
+
+void _mi_prim_mem_init( mi_os_mem_config_t* config )
+{
+ long psize = sysconf(_SC_PAGESIZE);
+ if (psize > 0 && (unsigned long)psize < SIZE_MAX) {
+ config->page_size = (size_t)psize;
+ config->alloc_granularity = (size_t)psize;
+ unix_detect_physical_memory(config->page_size, &config->physical_memory_in_kib);
+ }
+ config->large_page_size = MI_UNIX_LARGE_PAGE_SIZE;
+ config->has_overcommit = unix_detect_overcommit();
+ config->has_partial_free = true; // mmap can free in parts
+ config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE)
+
+ // disable transparent huge pages for this process?
+ #if (defined(__linux__) || defined(__ANDROID__)) && defined(PR_GET_THP_DISABLE)
+ #if defined(MI_NO_THP)
+ if (true)
+ #else
+ if (!mi_option_is_enabled(mi_option_allow_thp)) // disable THP if requested through an option
+ #endif
+ {
+ int val = 0;
+ if (prctl(PR_GET_THP_DISABLE, &val, 0, 0, 0) != 0) {
+ // Most likely since distros often come with always/madvise settings.
+ val = 1;
+ // Disabling only for mimalloc process rather than touching system wide settings
+ (void)prctl(PR_SET_THP_DISABLE, &val, 0, 0, 0);
+ }
+ }
+ #endif
+}
+
+
+//---------------------------------------------
+// free
+//---------------------------------------------
+
+int _mi_prim_free(void* addr, size_t size ) {
+ if (size==0) return 0;
+ bool err = (munmap(addr, size) == -1);
+ return (err ? errno : 0);
+}
+
+
+//---------------------------------------------
+// mmap
+//---------------------------------------------
+
+static int unix_madvise(void* addr, size_t size, int advice) {
+ #if defined(__sun)
+ int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520)
+ #elif defined(__QNX__)
+ int res = posix_madvise(addr, size, advice);
+ #else
+ int res = madvise(addr, size, advice);
+ #endif
+ return (res==0 ? 0 : errno);
+}
+
+static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) {
+ void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */);
+ #if defined(__linux__) && defined(PR_SET_VMA)
+ if (p!=MAP_FAILED && p!=NULL) {
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc");
+ }
+ #endif
+ return p;
+}
+
+static void* unix_mmap_prim_aligned(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
+ MI_UNUSED(try_alignment);
+ void* p = NULL;
+ #if defined(MAP_ALIGNED) // BSD
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
+ size_t n = mi_bsr(try_alignment);
+ if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
+ p = unix_mmap_prim(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd);
+ if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
+ int err = errno;
+ _mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
+ }
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ }
+ #elif defined(MAP_ALIGN) // Solaris
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
+ p = unix_mmap_prim((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd); // addr parameter is the required alignment
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ #endif
+ #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
+ // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations
+ if (addr == NULL) {
+ void* hint = _mi_os_get_aligned_hint(try_alignment, size);
+ if (hint != NULL) {
+ p = unix_mmap_prim(hint, size, protect_flags, flags, fd);
+ if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
+ #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
+ int err = 0;
+ #else
+ int err = errno;
+ #endif
+ _mi_trace_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
+ }
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ }
+ #endif
+ // regular mmap
+ p = unix_mmap_prim(addr, size, protect_flags, flags, fd);
+ if (p!=MAP_FAILED) return p;
+ // failed to allocate
+ return NULL;
+}
+
+static int unix_mmap_fd(void) {
+ #if defined(VM_MAKE_TAG)
+ // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
+ int os_tag = (int)mi_option_get(mi_option_os_tag);
+ if (os_tag < 100 || os_tag > 255) { os_tag = 254; }
+ return VM_MAKE_TAG(os_tag);
+ #else
+ return -1;
+ #endif
+}
+
+static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
+ #if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+ #if !defined(MAP_NORESERVE)
+ #define MAP_NORESERVE 0
+ #endif
+ void* p = NULL;
+ const int fd = unix_mmap_fd();
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (_mi_os_has_overcommit()) {
+ flags |= MAP_NORESERVE;
+ }
+ #if defined(PROT_MAX)
+ protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
+ #endif
+ // huge page allocation
+ if (allow_large && (large_only || (_mi_os_canuse_large_page(size, try_alignment) && mi_option_is_enabled(mi_option_allow_large_os_pages)))) {
+ static _Atomic(size_t) large_page_try_ok; // = 0;
+ size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
+ if (!large_only && try_ok > 0) {
+ // If the OS is not configured for large OS pages, or the user does not have
+ // enough permission, the `mmap` will always fail (but it might also fail for other reasons).
+ // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
+ // to avoid too many failing calls to mmap.
+ mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
+ }
+ else {
+ int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
+ int lfd = fd;
+ #ifdef MAP_ALIGNED_SUPER
+ lflags |= MAP_ALIGNED_SUPER;
+ #endif
+ #ifdef MAP_HUGETLB
+ lflags |= MAP_HUGETLB;
+ #endif
+ #ifdef MAP_HUGE_1GB
+ static bool mi_huge_pages_available = true;
+ if (large_only && (size % MI_GiB) == 0 && mi_huge_pages_available) {
+ lflags |= MAP_HUGE_1GB;
+ }
+ else
+ #endif
+ {
+ #ifdef MAP_HUGE_2MB
+ lflags |= MAP_HUGE_2MB;
+ #endif
+ }
+ #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
+ lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
+ #endif
+ if (large_only || lflags != flags) {
+ // try large OS page allocation
+ *is_large = true;
+ p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd);
+ #ifdef MAP_HUGE_1GB
+ if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) {
+ mi_huge_pages_available = false; // don't try huge 1GiB pages again
+ if (large_only) {
+ _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno);
+ }
+ lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
+ p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd);
+ }
+ #endif
+ if (large_only) return p;
+ if (p == NULL) {
+ mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations
+ }
+ }
+ }
+ }
+ // regular allocation
+ if (p == NULL) {
+ *is_large = false;
+ p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, flags, fd);
+ #if !defined(MI_NO_THP)
+ if (p != NULL && allow_large && mi_option_is_enabled(mi_option_allow_thp) && _mi_os_canuse_large_page(size, try_alignment)) {
+ #if defined(MADV_HUGEPAGE)
+ // Many Linux systems don't allow MAP_HUGETLB but they support instead
+ // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE
+ // though since properly aligned allocations will already use large pages if available
+ // in that case -- in particular for our large regions (in `memory.c`).
+ // However, some systems only allow THP if called with explicit `madvise`, so
+ // when large OS pages are enabled for mimalloc, we call `madvise` anyways.
+ if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) {
+ // *is_large = true; // possibly
+ };
+ #elif defined(__sun)
+ struct memcntl_mha cmd = {0};
+ cmd.mha_pagesize = _mi_os_large_page_size();
+ cmd.mha_cmd = MHA_MAPSIZE_VA;
+ if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
+ // *is_large = true; // possibly
+ }
+ #endif
+ }
+ #endif
+ }
+ return p;
+}
+
+// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(commit || !allow_large);
+ mi_assert_internal(try_alignment > 0);
+ if (hint_addr == NULL && size >= 8*MI_UNIX_LARGE_PAGE_SIZE && try_alignment > 1 && _mi_is_power_of_two(try_alignment) && try_alignment < MI_UNIX_LARGE_PAGE_SIZE) {
+ try_alignment = MI_UNIX_LARGE_PAGE_SIZE; // try to align along large page size for larger allocations
+ }
+
+ *is_zero = true;
+ int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
+ *addr = unix_mmap(hint_addr, size, try_alignment, protect_flags, false, allow_large, is_large);
+ return (*addr != NULL ? 0 : errno);
+}
+
+
+//---------------------------------------------
+// Commit/Reset
+//---------------------------------------------
+
+static void unix_mprotect_hint(int err) {
+ #if defined(__linux__) && (MI_SECURE>=2) // guard page around every mimalloc page
+ if (err == ENOMEM) {
+ _mi_warning_message("The next warning may be caused by a low memory map limit.\n"
+ " On Linux this is controlled by the vm.max_map_count -- maybe increase it?\n"
+ " For example: sudo sysctl -w vm.max_map_count=262144\n");
+ }
+ #else
+ MI_UNUSED(err);
+ #endif
+}
+
+int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
+ // commit: ensure we can access the area
+ // note: we may think that *is_zero can be true since the memory
+ // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but
+ // we sometimes call commit on a range with still partially committed
+ // memory and `mprotect` does not zero the range.
+ *is_zero = false;
+ int err = mprotect(start, size, (PROT_READ | PROT_WRITE));
+ if (err != 0) {
+ err = errno;
+ unix_mprotect_hint(err);
+ }
+ return err;
+}
+
+int _mi_prim_reuse(void* start, size_t size) {
+ MI_UNUSED(start); MI_UNUSED(size);
+ #if defined(__APPLE__) && defined(MADV_FREE_REUSE)
+ return unix_madvise(start, size, MADV_FREE_REUSE);
+ #endif
+ return 0;
+}
+
+int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
+ int err = 0;
+ #if defined(__APPLE__) && defined(MADV_FREE_REUSABLE)
+ // decommit on macOS: use MADV_FREE_REUSABLE as it does immediate rss accounting (issue #1097)
+ err = unix_madvise(start, size, MADV_FREE_REUSABLE);
+ if (err) { err = unix_madvise(start, size, MADV_DONTNEED); }
+ #else
+ // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ #endif
+ #if !MI_DEBUG && MI_SECURE<=2
+ *needs_recommit = false;
+ #else
+ *needs_recommit = true;
+ mprotect(start, size, PROT_NONE);
+ #endif
+ /*
+ // decommit: use mmap with MAP_FIXED and PROT_NONE to discard the existing memory (and reduce rss)
+ *needs_recommit = true;
+ const int fd = unix_mmap_fd();
+ void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
+ if (p != start) { err = errno; }
+ */
+ return err;
+}
+
+int _mi_prim_reset(void* start, size_t size) {
+ int err = 0;
+
+ // on macOS can use MADV_FREE_REUSABLE (but we disable this for now as it seems slower)
+ #if 0 && defined(__APPLE__) && defined(MADV_FREE_REUSABLE)
+ err = unix_madvise(start, size, MADV_FREE_REUSABLE);
+ if (err==0) return 0;
+ // fall through
+ #endif
+
+ #if defined(MADV_FREE)
+ // Otherwise, we try to use `MADV_FREE` as that is the fastest. A drawback though is that it
+ // will not reduce the `rss` stats in tools like `top` even though the memory is available
+ // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
+ // default `MADV_DONTNEED` is used though.
+ static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
+ int oadvice = (int)mi_atomic_load_relaxed(&advice);
+ while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; };
+ if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
+ // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
+ mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED);
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ }
+ #else
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ #endif
+ return err;
+}
+
+int _mi_prim_protect(void* start, size_t size, bool protect) {
+ int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
+ if (err != 0) { err = errno; }
+ unix_mprotect_hint(err);
+ return err;
+}
+
+
+
+//---------------------------------------------
+// Huge page allocation
+//---------------------------------------------
+
+#if (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) && !defined(__CYGWIN__)
+
+#ifndef MPOL_PREFERRED
+#define MPOL_PREFERRED 1
+#endif
+
+#if defined(MI_HAS_SYSCALL_H) && defined(SYS_mbind)
+static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
+ return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags);
+}
+#else
+static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
+ MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags);
+ return 0;
+}
+#endif
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) {
+ bool is_large = true;
+ *is_zero = true;
+ *addr = unix_mmap(hint_addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large);
+ if (*addr != NULL && numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes
+ unsigned long numa_mask = (1UL << numa_node);
+ // TODO: does `mbind` work correctly for huge OS pages? should we
+ // use `set_mempolicy` before calling mmap instead?
+ // see:
+ long err = mi_prim_mbind(*addr, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
+ if (err != 0) {
+ err = errno;
+ _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
+ }
+ }
+ return (*addr != NULL ? 0 : errno);
+}
+
+#else
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) {
+ MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node);
+ *is_zero = false;
+ *addr = NULL;
+ return ENOMEM;
+}
+
+#endif
+
+//---------------------------------------------
+// NUMA nodes
+//---------------------------------------------
+
+#if defined(__linux__)
+
+size_t _mi_prim_numa_node(void) {
+ #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getcpu)
+ unsigned long node = 0;
+ unsigned long ncpu = 0;
+ long err = syscall(SYS_getcpu, &ncpu, &node, NULL);
+ if (err != 0) return 0;
+ return node;
+ #else
+ return 0;
+ #endif
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ char buf[128];
+ unsigned node = 0;
+ for(node = 0; node < 256; node++) {
+ // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation)
+ _mi_snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1);
+ if (mi_prim_access(buf,R_OK) != 0) break;
+ }
+ return (node+1);
+}
+
+#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000
+
+size_t _mi_prim_numa_node(void) {
+ domainset_t dom;
+ size_t node;
+ int policy;
+ if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul;
+ for (node = 0; node < MAXMEMDOM; node++) {
+ if (DOMAINSET_ISSET(node, &dom)) return node;
+ }
+ return 0ul;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ size_t ndomains = 0;
+ size_t len = sizeof(ndomains);
+ if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul;
+ return ndomains;
+}
+
+#elif defined(__DragonFly__)
+
+size_t _mi_prim_numa_node(void) {
+ // TODO: DragonFly does not seem to provide any userland means to get this information.
+ return 0ul;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ size_t ncpus = 0, nvirtcoresperphys = 0;
+ size_t len = sizeof(size_t);
+ if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul;
+ if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul;
+ return nvirtcoresperphys * ncpus;
+}
+
+#else
+
+size_t _mi_prim_numa_node(void) {
+ return 0;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ return 1;
+}
+
+#endif
+
+// ----------------------------------------------------------------
+// Clock
+// ----------------------------------------------------------------
+
+#include
+
+#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)
+
+mi_msecs_t _mi_prim_clock_now(void) {
+ struct timespec t;
+ #ifdef CLOCK_MONOTONIC
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ #else
+ clock_gettime(CLOCK_REALTIME, &t);
+ #endif
+ return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000);
+}
+
+#else
+
+// low resolution timer
+mi_msecs_t _mi_prim_clock_now(void) {
+ #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
+ return (mi_msecs_t)clock();
+ #elif (CLOCKS_PER_SEC < 1000)
+ return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
+ #else
+ return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
+ #endif
+}
+
+#endif
+
+
+
+
+//----------------------------------------------------------------
+// Process info
+//----------------------------------------------------------------
+
+#if defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__)
+#include
+#include
+#include
+
+#if defined(__APPLE__)
+#include
+#endif
+
+#if defined(__HAIKU__)
+#include
+#endif
+
+static mi_msecs_t timeval_secs(const struct timeval* tv) {
+ return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L);
+}
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ struct rusage rusage;
+ getrusage(RUSAGE_SELF, &rusage);
+ pinfo->utime = timeval_secs(&rusage.ru_utime);
+ pinfo->stime = timeval_secs(&rusage.ru_stime);
+#if !defined(__HAIKU__)
+ pinfo->page_faults = rusage.ru_majflt;
+#endif
+#if defined(__HAIKU__)
+ // Haiku does not have (yet?) a way to
+ // get these stats per process
+ thread_info tid;
+ area_info mem;
+ ssize_t c;
+ get_thread_info(find_thread(0), &tid);
+ while (get_next_area_info(tid.team, &c, &mem) == B_OK) {
+ pinfo->peak_rss += mem.ram_size;
+ }
+ pinfo->page_faults = 0;
+#elif defined(__APPLE__)
+ pinfo->peak_rss = rusage.ru_maxrss; // macos reports in bytes
+ #ifdef MACH_TASK_BASIC_INFO
+ struct mach_task_basic_info info;
+ mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT;
+ if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) {
+ pinfo->current_rss = (size_t)info.resident_size;
+ }
+ #else
+ struct task_basic_info info;
+ mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;
+ if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) {
+ pinfo->current_rss = (size_t)info.resident_size;
+ }
+ #endif
+#else
+ pinfo->peak_rss = rusage.ru_maxrss * 1024; // Linux/BSD report in KiB
+#endif
+ // use defaults for commit
+}
+
+#else
+
+#ifndef __wasi__
+// WebAssembly instances are not processes
+#pragma message("define a way to get process info")
+#endif
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ // use defaults
+ MI_UNUSED(pinfo);
+}
+
+#endif
+
+
+//----------------------------------------------------------------
+// Output
+//----------------------------------------------------------------
+
+void _mi_prim_out_stderr( const char* msg ) {
+ fputs(msg,stderr);
+}
+
+
+//----------------------------------------------------------------
+// Environment
+//----------------------------------------------------------------
+
+#if !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0)
+// On Posix systemsr use `environ` to access environment variables
+// even before the C runtime is initialized.
+#if defined(__APPLE__) && defined(__has_include) && __has_include()
+#include
+static char** mi_get_environ(void) {
+ return (*_NSGetEnviron());
+}
+#else
+extern char** environ;
+static char** mi_get_environ(void) {
+ return environ;
+}
+#endif
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ if (name==NULL) return false;
+ const size_t len = _mi_strlen(name);
+ if (len == 0) return false;
+ char** env = mi_get_environ();
+ if (env == NULL) return false;
+ // compare up to 10000 entries
+ for (int i = 0; i < 10000 && env[i] != NULL; i++) {
+ const char* s = env[i];
+ if (_mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive
+ // found it
+ _mi_strlcpy(result, s + len + 1, result_size);
+ return true;
+ }
+ }
+ return false;
+}
+#else
+// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ // cannot call getenv() when still initializing the C runtime.
+ if (_mi_preloading()) return false;
+ const char* s = getenv(name);
+ if (s == NULL) {
+ // we check the upper case name too.
+ char buf[64+1];
+ size_t len = _mi_strnlen(name,sizeof(buf)-1);
+ for (size_t i = 0; i < len; i++) {
+ buf[i] = _mi_toupper(name[i]);
+ }
+ buf[len] = 0;
+ s = getenv(buf);
+ }
+ if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false;
+ _mi_strlcpy(result, s, result_size);
+ return true;
+}
+#endif // !MI_USE_ENVIRON
+
+
+//----------------------------------------------------------------
+// Random
+//----------------------------------------------------------------
+
+#if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_15) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_15)
+#include
+#include
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ // We prefer CCRandomGenerateBytes as it returns an error code while arc4random_buf
+ // may fail silently on macOS. See PR #390, and
+ return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
+}
+
+#elif defined(__ANDROID__) || defined(__DragonFly__) || \
+ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
+ defined(__sun) || \
+ (defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7))
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ arc4random_buf(buf, buf_len);
+ return true;
+}
+
+#elif defined(__APPLE__) || defined(__linux__) || defined(__HAIKU__) // also for old apple versions < 10.7 (issue #829)
+
+#include
+#include
+#include
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h`
+ // and for the latter the actual `getrandom` call is not always defined.
+ // (see )
+ // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed.
+ #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getrandom)
+ #ifndef GRND_NONBLOCK
+ #define GRND_NONBLOCK (1)
+ #endif
+ static _Atomic(uintptr_t) no_getrandom; // = 0
+ if (mi_atomic_load_acquire(&no_getrandom)==0) {
+ ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK);
+ if (ret >= 0) return (buf_len == (size_t)ret);
+ if (errno != ENOSYS) return false;
+ mi_atomic_store_release(&no_getrandom, (uintptr_t)1); // don't call again, and fall back to /dev/urandom
+ }
+ #endif
+ int flags = O_RDONLY;
+ #if defined(O_CLOEXEC)
+ flags |= O_CLOEXEC;
+ #endif
+ int fd = mi_prim_open("/dev/urandom", flags);
+ if (fd < 0) return false;
+ size_t count = 0;
+ while(count < buf_len) {
+ ssize_t ret = mi_prim_read(fd, (char*)buf + count, buf_len - count);
+ if (ret<=0) {
+ if (errno!=EAGAIN && errno!=EINTR) break;
+ }
+ else {
+ count += ret;
+ }
+ }
+ mi_prim_close(fd);
+ return (count==buf_len);
+}
+
+#else
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ return false;
+}
+
+#endif
+
+
+//----------------------------------------------------------------
+// Thread init/done
+//----------------------------------------------------------------
+
+#if defined(MI_USE_PTHREADS)
+
+// use pthread local storage keys to detect thread ending
+// (and used with MI_TLS_PTHREADS for the default heap)
+pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1);
+
+static void mi_pthread_done(void* value) {
+ if (value!=NULL) {
+ _mi_thread_done((mi_heap_t*)value);
+ }
+}
+
+void _mi_prim_thread_init_auto_done(void) {
+ mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1));
+ pthread_key_create(&_mi_heap_default_key, &mi_pthread_done);
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+ if (_mi_heap_default_key != (pthread_key_t)(-1)) { // do not leak the key, see issue #809
+ pthread_key_delete(_mi_heap_default_key);
+ }
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD
+ pthread_setspecific(_mi_heap_default_key, heap);
+ }
+}
+
+#else
+
+void _mi_prim_thread_init_auto_done(void) {
+ // nothing
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+ // nothing
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+}
+
+#endif
diff --git a/compat/mimalloc/prim/windows/prim.c b/compat/mimalloc/prim/windows/prim.c
new file mode 100644
index 00000000000000..75a93d2a7277de
--- /dev/null
+++ b/compat/mimalloc/prim/windows/prim.c
@@ -0,0 +1,879 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// This file is included in `src/prim/prim.c`
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h"
+#include // fputs, stderr
+
+// xbox has no console IO
+#if !defined(WINAPI_FAMILY_PARTITION) || WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
+#define MI_HAS_CONSOLE_IO
+#endif
+
+//---------------------------------------------
+// Dynamically bind Windows API points for portability
+//---------------------------------------------
+
+// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016.
+// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility)
+// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB)
+// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's.
+typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E {
+ MiMemExtendedParameterInvalidType = 0,
+ MiMemExtendedParameterAddressRequirements,
+ MiMemExtendedParameterNumaNode,
+ MiMemExtendedParameterPartitionHandle,
+ MiMemExtendedParameterUserPhysicalHandle,
+ MiMemExtendedParameterAttributeFlags,
+ MiMemExtendedParameterMax
+} MI_MEM_EXTENDED_PARAMETER_TYPE;
+
+typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S {
+ struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type;
+ union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg;
+} MI_MEM_EXTENDED_PARAMETER;
+
+typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S {
+ PVOID LowestStartingAddress;
+ PVOID HighestEndingAddress;
+ SIZE_T Alignment;
+} MI_MEM_ADDRESS_REQUIREMENTS;
+
+#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010
+
+#include
+typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG);
+typedef LONG (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); // avoid NTSTATUS as it is not defined on xbox (pr #1084)
+static PVirtualAlloc2 pVirtualAlloc2 = NULL;
+static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
+
+// Similarly, GetNumaProcessorNodeEx is only supported since Windows 7 (and GetNumaNodeProcessorMask is not supported on xbox)
+typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER;
+
+typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber);
+typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber);
+typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask);
+typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber);
+typedef BOOL (__stdcall* PGetNumaNodeProcessorMask)(UCHAR Node, PULONGLONG ProcessorMask);
+typedef BOOL (__stdcall* PGetNumaHighestNodeNumber)(PULONG Node);
+static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL;
+static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL;
+static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL;
+static PGetNumaProcessorNode pGetNumaProcessorNode = NULL;
+static PGetNumaNodeProcessorMask pGetNumaNodeProcessorMask = NULL;
+static PGetNumaHighestNodeNumber pGetNumaHighestNodeNumber = NULL;
+
+// Not available on xbox
+typedef SIZE_T(__stdcall* PGetLargePageMinimum)(VOID);
+static PGetLargePageMinimum pGetLargePageMinimum = NULL;
+
+// Available after Windows XP
+typedef BOOL (__stdcall *PGetPhysicallyInstalledSystemMemory)( PULONGLONG TotalMemoryInKilobytes );
+
+//---------------------------------------------
+// Enable large page support dynamically (if possible)
+//---------------------------------------------
+
+static bool win_enable_large_os_pages(size_t* large_page_size)
+{
+ static bool large_initialized = false;
+ if (large_initialized) return (_mi_os_large_page_size() > 0);
+ large_initialized = true;
+ if (pGetLargePageMinimum==NULL) return false; // no large page support (xbox etc.)
+
+ // Try to see if large OS pages are supported
+ // To use large pages on Windows, we first need access permission
+ // Set "Lock pages in memory" permission in the group policy editor
+ //
+ unsigned long err = 0;
+ HANDLE token = NULL;
+ BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
+ if (ok) {
+ TOKEN_PRIVILEGES tp;
+ ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid);
+ if (ok) {
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+ ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
+ if (ok) {
+ err = GetLastError();
+ ok = (err == ERROR_SUCCESS);
+ if (ok && large_page_size != NULL && pGetLargePageMinimum != NULL) {
+ *large_page_size = (*pGetLargePageMinimum)();
+ }
+ }
+ }
+ CloseHandle(token);
+ }
+ if (!ok) {
+ if (err == 0) err = GetLastError();
+ _mi_warning_message("cannot enable large OS page support, error %lu\n", err);
+ }
+ return (ok!=0);
+}
+
+
+//---------------------------------------------
+// Initialize
+//---------------------------------------------
+
+void _mi_prim_mem_init( mi_os_mem_config_t* config )
+{
+ config->has_overcommit = false;
+ config->has_partial_free = false;
+ config->has_virtual_reserve = true;
+ // get the page size
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; }
+ if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; }
+ // get virtual address bits
+ if ((uintptr_t)si.lpMaximumApplicationAddress > 0) {
+ const size_t vbits = MI_SIZE_BITS - mi_clz((uintptr_t)si.lpMaximumApplicationAddress);
+ config->virtual_address_bits = vbits;
+ }
+
+ // get the VirtualAlloc2 function
+ HINSTANCE hDll;
+ hDll = LoadLibrary(TEXT("kernelbase.dll"));
+ if (hDll != NULL) {
+ // use VirtualAlloc2FromApp if possible as it is available to Windows store apps
+ pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp");
+ if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2");
+ FreeLibrary(hDll);
+ }
+ // NtAllocateVirtualMemoryEx is used for huge page allocation
+ hDll = LoadLibrary(TEXT("ntdll.dll"));
+ if (hDll != NULL) {
+ pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
+ FreeLibrary(hDll);
+ }
+ // Try to use Win7+ numa API
+ hDll = LoadLibrary(TEXT("kernel32.dll"));
+ if (hDll != NULL) {
+ pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx");
+ pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx");
+ pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx");
+ pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode");
+ pGetNumaNodeProcessorMask = (PGetNumaNodeProcessorMask)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMask");
+ pGetNumaHighestNodeNumber = (PGetNumaHighestNodeNumber)(void (*)(void))GetProcAddress(hDll, "GetNumaHighestNodeNumber");
+ pGetLargePageMinimum = (PGetLargePageMinimum)(void (*)(void))GetProcAddress(hDll, "GetLargePageMinimum");
+ // Get physical memory (not available on XP, so check dynamically)
+ PGetPhysicallyInstalledSystemMemory pGetPhysicallyInstalledSystemMemory = (PGetPhysicallyInstalledSystemMemory)(void (*)(void))GetProcAddress(hDll,"GetPhysicallyInstalledSystemMemory");
+ if (pGetPhysicallyInstalledSystemMemory != NULL) {
+ ULONGLONG memInKiB = 0;
+ if ((*pGetPhysicallyInstalledSystemMemory)(&memInKiB)) {
+ if (memInKiB > 0 && memInKiB <= SIZE_MAX) {
+ config->physical_memory_in_kib = (size_t)memInKiB;
+ }
+ }
+ }
+ FreeLibrary(hDll);
+ }
+ // Enable large/huge OS page support?
+ if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
+ win_enable_large_os_pages(&config->large_page_size);
+ }
+}
+
+
+//---------------------------------------------
+// Free
+//---------------------------------------------
+
+int _mi_prim_free(void* addr, size_t size ) {
+ MI_UNUSED(size);
+ DWORD errcode = 0;
+ bool err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
+ if (err) { errcode = GetLastError(); }
+ if (errcode == ERROR_INVALID_ADDRESS) {
+ // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside
+ // the memory region returned by VirtualAlloc; in that case we need to free using
+ // the start of the region.
+ MEMORY_BASIC_INFORMATION info; _mi_memzero_var(info);
+ VirtualQuery(addr, &info, sizeof(info));
+ if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) {
+ errcode = 0;
+ err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0);
+ if (err) { errcode = GetLastError(); }
+ }
+ }
+ return (int)errcode;
+}
+
+
+//---------------------------------------------
+// VirtualAlloc
+//---------------------------------------------
+
+static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_alignment, DWORD flags) {
+ #if (MI_INTPTR_SIZE >= 8)
+ // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations
+ if (addr == NULL) {
+ void* hint = _mi_os_get_aligned_hint(try_alignment,size);
+ if (hint != NULL) {
+ void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
+ if (p != NULL) return p;
+ _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags);
+ // fall through on error
+ }
+ }
+ #endif
+ // on modern Windows try use VirtualAlloc2 for aligned allocation
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
+ MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 };
+ reqs.Alignment = try_alignment;
+ MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} };
+ param.Type.Type = MiMemExtendedParameterAddressRequirements;
+ param.Arg.Pointer = &reqs;
+ void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1);
+ if (p != NULL) return p;
+ _mi_warning_message("unable to allocate aligned OS memory (0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
+ // fall through on error
+ }
+ // last resort
+ return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
+}
+
+static bool win_is_out_of_memory_error(DWORD err) {
+ switch (err) {
+ case ERROR_COMMITMENT_MINIMUM:
+ case ERROR_COMMITMENT_LIMIT:
+ case ERROR_PAGEFILE_QUOTA:
+ case ERROR_NOT_ENOUGH_MEMORY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) {
+ long max_retry_msecs = mi_option_get_clamp(mi_option_retry_on_oom, 0, 2000); // at most 2 seconds
+ if (max_retry_msecs == 1) { max_retry_msecs = 100; } // if one sets the option to "true"
+ for (long tries = 1; tries <= 10; tries++) { // try at most 10 times (=2200ms)
+ void* p = win_virtual_alloc_prim_once(addr, size, try_alignment, flags);
+ if (p != NULL) {
+ // success, return the address
+ return p;
+ }
+ else if (max_retry_msecs > 0 && (try_alignment <= 2*MI_SEGMENT_ALIGN) &&
+ (flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 &&
+ win_is_out_of_memory_error(GetLastError())) {
+ // if committing regular memory and being out-of-memory,
+ // keep trying for a bit in case memory frees up after all. See issue #894
+ _mi_warning_message("out-of-memory on OS allocation, try again... (attempt %lu, 0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", tries, size, GetLastError(), addr, try_alignment, flags);
+ long sleep_msecs = tries*40; // increasing waits
+ if (sleep_msecs > max_retry_msecs) { sleep_msecs = max_retry_msecs; }
+ max_retry_msecs -= sleep_msecs;
+ Sleep(sleep_msecs);
+ }
+ else {
+ // otherwise return with an error
+ break;
+ }
+ }
+ return NULL;
+}
+
+static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) {
+ mi_assert_internal(!(large_only && !allow_large));
+ static _Atomic(size_t) large_page_try_ok; // = 0;
+ void* p = NULL;
+ // Try to allocate large OS pages (2MiB) if allowed or required.
+ if ((large_only || (_mi_os_canuse_large_page(size, try_alignment) && mi_option_is_enabled(mi_option_allow_large_os_pages)))
+ && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0)
+ {
+ size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
+ if (!large_only && try_ok > 0) {
+ // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
+ // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
+ mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
+ }
+ else {
+ // large OS pages must always reserve and commit.
+ *is_large = true;
+ p = win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES);
+ if (large_only) return p;
+ // fall back to non-large page allocation on error (`p == NULL`).
+ if (p == NULL) {
+ mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations
+ }
+ }
+ }
+ // Fall back to regular page allocation
+ if (p == NULL) {
+ *is_large = ((flags&MEM_LARGE_PAGES) != 0);
+ p = win_virtual_alloc_prim(addr, size, try_alignment, flags);
+ }
+ //if (p == NULL) { _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); }
+ return p;
+}
+
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(commit || !allow_large);
+ mi_assert_internal(try_alignment > 0);
+ *is_zero = true;
+ int flags = MEM_RESERVE;
+ if (commit) { flags |= MEM_COMMIT; }
+ *addr = win_virtual_alloc(hint_addr, size, try_alignment, flags, false, allow_large, is_large);
+ return (*addr != NULL ? 0 : (int)GetLastError());
+}
+
+
+//---------------------------------------------
+// Commit/Reset/Protect
+//---------------------------------------------
+#ifdef _MSC_VER
+#pragma warning(disable:6250) // suppress warning calling VirtualFree without MEM_RELEASE (for decommit)
+#endif
+
+int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
+ *is_zero = false;
+ /*
+ // zero'ing only happens on an initial commit... but checking upfront seems expensive..
+ _MEMORY_BASIC_INFORMATION meminfo; _mi_memzero_var(meminfo);
+ if (VirtualQuery(addr, &meminfo, size) > 0) {
+ if ((meminfo.State & MEM_COMMIT) == 0) {
+ *is_zero = true;
+ }
+ }
+ */
+ // commit
+ void* p = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
+ if (p == NULL) return (int)GetLastError();
+ return 0;
+}
+
+int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) {
+ BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT);
+ *needs_recommit = true; // for safety, assume always decommitted even in the case of an error.
+ return (ok ? 0 : (int)GetLastError());
+}
+
+int _mi_prim_reset(void* addr, size_t size) {
+ void* p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ mi_assert_internal(p == addr);
+ #if 0
+ if (p != NULL) {
+ VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory directly from the working set
+ }
+ #endif
+ return (p != NULL ? 0 : (int)GetLastError());
+}
+
+int _mi_prim_reuse(void* addr, size_t size) {
+ MI_UNUSED(addr); MI_UNUSED(size);
+ return 0;
+}
+
+int _mi_prim_protect(void* addr, size_t size, bool protect) {
+ DWORD oldprotect = 0;
+ BOOL ok = VirtualProtect(addr, size, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect);
+ return (ok ? 0 : (int)GetLastError());
+}
+
+
+//---------------------------------------------
+// Huge page allocation
+//---------------------------------------------
+
+static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int numa_node)
+{
+ const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
+
+ win_enable_large_os_pages(NULL);
+
+ MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
+ // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
+ static bool mi_huge_pages_available = true;
+ if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) {
+ params[0].Type.Type = MiMemExtendedParameterAttributeFlags;
+ params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
+ ULONG param_count = 1;
+ if (numa_node >= 0) {
+ param_count++;
+ params[1].Type.Type = MiMemExtendedParameterNumaNode;
+ params[1].Arg.ULong = (unsigned)numa_node;
+ }
+ SIZE_T psize = size;
+ void* base = hint_addr;
+ LONG err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count);
+ if (err == 0 && base != NULL) {
+ return base;
+ }
+ else {
+ // fall back to regular large pages
+ mi_huge_pages_available = false; // don't try further huge pages
+ _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err);
+ }
+ }
+ // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
+ if (pVirtualAlloc2 != NULL && numa_node >= 0) {
+ params[0].Type.Type = MiMemExtendedParameterNumaNode;
+ params[0].Arg.ULong = (unsigned)numa_node;
+ return (*pVirtualAlloc2)(GetCurrentProcess(), hint_addr, size, flags, PAGE_READWRITE, params, 1);
+ }
+
+ // otherwise use regular virtual alloc on older windows
+ return VirtualAlloc(hint_addr, size, flags, PAGE_READWRITE);
+}
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) {
+ *is_zero = true;
+ *addr = _mi_prim_alloc_huge_os_pagesx(hint_addr,size,numa_node);
+ return (*addr != NULL ? 0 : (int)GetLastError());
+}
+
+
+//---------------------------------------------
+// Numa nodes
+//---------------------------------------------
+
+size_t _mi_prim_numa_node(void) {
+ USHORT numa_node = 0;
+ if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) {
+ // Extended API is supported
+ MI_PROCESSOR_NUMBER pnum;
+ (*pGetCurrentProcessorNumberEx)(&pnum);
+ USHORT nnode = 0;
+ BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode);
+ if (ok) { numa_node = nnode; }
+ }
+ else if (pGetNumaProcessorNode != NULL) {
+ // Vista or earlier, use older API that is limited to 64 processors. Issue #277
+ DWORD pnum = GetCurrentProcessorNumber();
+ UCHAR nnode = 0;
+ BOOL ok = pGetNumaProcessorNode((UCHAR)pnum, &nnode);
+ if (ok) { numa_node = nnode; }
+ }
+ return numa_node;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ ULONG numa_max = 0;
+ if (pGetNumaHighestNodeNumber!=NULL) {
+ (*pGetNumaHighestNodeNumber)(&numa_max);
+ }
+ // find the highest node number that has actual processors assigned to it. Issue #282
+ while (numa_max > 0) {
+ if (pGetNumaNodeProcessorMaskEx != NULL) {
+ // Extended API is supported
+ GROUP_AFFINITY affinity;
+ if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) {
+ if (affinity.Mask != 0) break; // found the maximum non-empty node
+ }
+ }
+ else {
+ // Vista or earlier, use older API that is limited to 64 processors.
+ ULONGLONG mask;
+ if (pGetNumaNodeProcessorMask != NULL) {
+ if ((*pGetNumaNodeProcessorMask)((UCHAR)numa_max, &mask)) {
+ if (mask != 0) break; // found the maximum non-empty node
+ }
+ };
+ }
+ // max node was invalid or had no processor assigned, try again
+ numa_max--;
+ }
+ return ((size_t)numa_max + 1);
+}
+
+
+//----------------------------------------------------------------
+// Clock
+//----------------------------------------------------------------
+
+static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) {
+ static LARGE_INTEGER mfreq; // = 0
+ if (mfreq.QuadPart == 0LL) {
+ LARGE_INTEGER f;
+ QueryPerformanceFrequency(&f);
+ mfreq.QuadPart = f.QuadPart/1000LL;
+ if (mfreq.QuadPart == 0) mfreq.QuadPart = 1;
+ }
+ return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart);
+}
+
+mi_msecs_t _mi_prim_clock_now(void) {
+ LARGE_INTEGER t;
+ QueryPerformanceCounter(&t);
+ return mi_to_msecs(t);
+}
+
+
+//----------------------------------------------------------------
+// Process Info
+//----------------------------------------------------------------
+
+#include
+
+static mi_msecs_t filetime_msecs(const FILETIME* ftime) {
+ ULARGE_INTEGER i;
+ i.LowPart = ftime->dwLowDateTime;
+ i.HighPart = ftime->dwHighDateTime;
+ mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds
+ return msecs;
+}
+
+typedef BOOL (WINAPI *PGetProcessMemoryInfo)(HANDLE, PPROCESS_MEMORY_COUNTERS, DWORD);
+static PGetProcessMemoryInfo pGetProcessMemoryInfo = NULL;
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ FILETIME ct;
+ FILETIME ut;
+ FILETIME st;
+ FILETIME et;
+ GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut);
+ pinfo->utime = filetime_msecs(&ut);
+ pinfo->stime = filetime_msecs(&st);
+
+ // load psapi on demand
+ if (pGetProcessMemoryInfo == NULL) {
+ HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll"));
+ if (hDll != NULL) {
+ pGetProcessMemoryInfo = (PGetProcessMemoryInfo)(void (*)(void))GetProcAddress(hDll, "GetProcessMemoryInfo");
+ }
+ }
+
+ // get process info
+ PROCESS_MEMORY_COUNTERS info; _mi_memzero_var(info);
+ if (pGetProcessMemoryInfo != NULL) {
+ pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
+ }
+ pinfo->current_rss = (size_t)info.WorkingSetSize;
+ pinfo->peak_rss = (size_t)info.PeakWorkingSetSize;
+ pinfo->current_commit = (size_t)info.PagefileUsage;
+ pinfo->peak_commit = (size_t)info.PeakPagefileUsage;
+ pinfo->page_faults = (size_t)info.PageFaultCount;
+}
+
+//----------------------------------------------------------------
+// Output
+//----------------------------------------------------------------
+
+void _mi_prim_out_stderr( const char* msg )
+{
+ // on windows with redirection, the C runtime cannot handle locale dependent output
+ // after the main thread closes so we use direct console output.
+ if (!_mi_preloading()) {
+ // _cputs(msg); // _cputs cannot be used as it aborts when failing to lock the console
+ static HANDLE hcon = INVALID_HANDLE_VALUE;
+ static bool hconIsConsole = false;
+ if (hcon == INVALID_HANDLE_VALUE) {
+ hcon = GetStdHandle(STD_ERROR_HANDLE);
+ #ifdef MI_HAS_CONSOLE_IO
+ CONSOLE_SCREEN_BUFFER_INFO sbi;
+ hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi));
+ #endif
+ }
+ const size_t len = _mi_strlen(msg);
+ if (len > 0 && len < UINT32_MAX) {
+ DWORD written = 0;
+ if (hconIsConsole) {
+ #ifdef MI_HAS_CONSOLE_IO
+ WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL);
+ #endif
+ }
+ else if (hcon != INVALID_HANDLE_VALUE) {
+ // use direct write if stderr was redirected
+ WriteFile(hcon, msg, (DWORD)len, &written, NULL);
+ }
+ else {
+ // finally fall back to fputs after all
+ fputs(msg, stderr);
+ }
+ }
+ }
+}
+
+
+//----------------------------------------------------------------
+// Environment
+//----------------------------------------------------------------
+
+// On Windows use GetEnvironmentVariable instead of getenv to work
+// reliably even when this is invoked before the C runtime is initialized.
+// i.e. when `_mi_preloading() == true`.
+// Note: on windows, environment names are not case sensitive.
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ result[0] = 0;
+ size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size);
+ return (len > 0 && len < result_size);
+}
+
+
+//----------------------------------------------------------------
+// Random
+//----------------------------------------------------------------
+
+#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus)
+// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using
+// dynamic overriding, we observed it can raise an exception when compiled with C++, and
+// sometimes deadlocks when also running under the VS debugger.
+// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom.
+// To be continued..
+#pragma comment (lib,"advapi32.lib")
+#define RtlGenRandom SystemFunction036
+mi_decl_externc BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength);
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ return (RtlGenRandom(buf, (ULONG)buf_len) != 0);
+}
+
+#else
+
+#ifndef BCRYPT_USE_SYSTEM_PREFERRED_RNG
+#define BCRYPT_USE_SYSTEM_PREFERRED_RNG 0x00000002
+#endif
+
+typedef LONG (NTAPI *PBCryptGenRandom)(HANDLE, PUCHAR, ULONG, ULONG);
+static PBCryptGenRandom pBCryptGenRandom = NULL;
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ if (pBCryptGenRandom == NULL) {
+ HINSTANCE hDll = LoadLibrary(TEXT("bcrypt.dll"));
+ if (hDll != NULL) {
+ pBCryptGenRandom = (PBCryptGenRandom)(void (*)(void))GetProcAddress(hDll, "BCryptGenRandom");
+ }
+ if (pBCryptGenRandom == NULL) return false;
+ }
+ return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0);
+}
+
+#endif // MI_USE_RTLGENRANDOM
+
+
+
+//----------------------------------------------------------------
+// Process & Thread Init/Done
+//----------------------------------------------------------------
+
+#if MI_WIN_USE_FIXED_TLS==1
+mi_decl_cache_align size_t _mi_win_tls_offset = 0;
+#endif
+
+//static void mi_debug_out(const char* s) {
+// HANDLE h = GetStdHandle(STD_ERROR_HANDLE);
+// WriteConsole(h, s, (DWORD)_mi_strlen(s), NULL, NULL);
+//}
+
+static void mi_win_tls_init(DWORD reason) {
+ if (reason==DLL_PROCESS_ATTACH || reason==DLL_THREAD_ATTACH) {
+ #if MI_WIN_USE_FIXED_TLS==1 // we must allocate a TLS slot dynamically
+ if (_mi_win_tls_offset == 0 && reason == DLL_PROCESS_ATTACH) {
+ const DWORD tls_slot = TlsAlloc(); // usually returns slot 1
+ if (tls_slot == TLS_OUT_OF_INDEXES) {
+ _mi_error_message(EFAULT, "unable to allocate the a TLS slot (rebuild without MI_WIN_USE_FIXED_TLS?)\n");
+ }
+ _mi_win_tls_offset = (size_t)tls_slot * sizeof(void*);
+ }
+ #endif
+ #if MI_HAS_TLS_SLOT >= 2 // we must initialize the TLS slot before any allocation
+ if (mi_prim_get_default_heap() == NULL) {
+ _mi_heap_set_default_direct((mi_heap_t*)&_mi_heap_empty);
+ #if MI_DEBUG && MI_WIN_USE_FIXED_TLS==1
+ void* const p = TlsGetValue((DWORD)(_mi_win_tls_offset / sizeof(void*)));
+ mi_assert_internal(p == (void*)&_mi_heap_empty);
+ #endif
+ }
+ #endif
+ }
+}
+
+static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) {
+ MI_UNUSED(reserved);
+ MI_UNUSED(module);
+ mi_win_tls_init(reason);
+ if (reason==DLL_PROCESS_ATTACH) {
+ _mi_auto_process_init();
+ }
+ else if (reason==DLL_PROCESS_DETACH) {
+ _mi_auto_process_done();
+ }
+ else if (reason==DLL_THREAD_DETACH && !_mi_is_redirected()) {
+ _mi_thread_done(NULL);
+ }
+}
+
+
+#if defined(MI_SHARED_LIB)
+ #define MI_PRIM_HAS_PROCESS_ATTACH 1
+
+ // Windows DLL: easy to hook into process_init and thread_done
+ BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
+ mi_win_main((PVOID)inst,reason,reserved);
+ return TRUE;
+ }
+
+ // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event.
+ void _mi_prim_thread_init_auto_done(void) { }
+ void _mi_prim_thread_done_auto_done(void) { }
+ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+ }
+
+#elif !defined(MI_WIN_USE_FLS)
+ #define MI_PRIM_HAS_PROCESS_ATTACH 1
+
+ static void NTAPI mi_win_main_attach(PVOID module, DWORD reason, LPVOID reserved) {
+ if (reason == DLL_PROCESS_ATTACH || reason == DLL_THREAD_ATTACH) {
+ mi_win_main(module, reason, reserved);
+ }
+ }
+ static void NTAPI mi_win_main_detach(PVOID module, DWORD reason, LPVOID reserved) {
+ if (reason == DLL_PROCESS_DETACH || reason == DLL_THREAD_DETACH) {
+ mi_win_main(module, reason, reserved);
+ }
+ }
+
+ // Set up TLS callbacks in a statically linked library by using special data sections.
+ // See
+ // We use 2 entries to ensure we call attach events before constructors
+ // are called, and detach events after destructors are called.
+ #if defined(__cplusplus)
+ extern "C" {
+ #endif
+
+ #if defined(_WIN64)
+ #pragma comment(linker, "/INCLUDE:_tls_used")
+ #pragma comment(linker, "/INCLUDE:_mi_tls_callback_pre")
+ #pragma comment(linker, "/INCLUDE:_mi_tls_callback_post")
+ #pragma const_seg(".CRT$XLB")
+ extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[];
+ const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach };
+ #pragma const_seg()
+ #pragma const_seg(".CRT$XLY")
+ extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[];
+ const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach };
+ #pragma const_seg()
+ #else
+ #pragma comment(linker, "/INCLUDE:__tls_used")
+ #pragma comment(linker, "/INCLUDE:__mi_tls_callback_pre")
+ #pragma comment(linker, "/INCLUDE:__mi_tls_callback_post")
+ #pragma data_seg(".CRT$XLB")
+ PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach };
+ #pragma data_seg()
+ #pragma data_seg(".CRT$XLY")
+ PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach };
+ #pragma data_seg()
+ #endif
+
+ #if defined(__cplusplus)
+ }
+ #endif
+
+ // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event.
+ void _mi_prim_thread_init_auto_done(void) { }
+ void _mi_prim_thread_done_auto_done(void) { }
+ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+ }
+
+#else // deprecated: statically linked, use fiber api
+
+ #if defined(_MSC_VER) // on clang/gcc use the constructor attribute (in `src/prim/prim.c`)
+ // MSVC: use data section magic for static libraries
+ // See
+ #define MI_PRIM_HAS_PROCESS_ATTACH 1
+
+ static int mi_process_attach(void) {
+ mi_win_main(NULL,DLL_PROCESS_ATTACH,NULL);
+ atexit(&_mi_auto_process_done);
+ return 0;
+ }
+ typedef int(*mi_crt_callback_t)(void);
+ #if defined(_WIN64)
+ #pragma comment(linker, "/INCLUDE:_mi_tls_callback")
+ #pragma section(".CRT$XIU", long, read)
+ #else
+ #pragma comment(linker, "/INCLUDE:__mi_tls_callback")
+ #endif
+ #pragma data_seg(".CRT$XIU")
+ mi_decl_externc mi_crt_callback_t _mi_tls_callback[] = { &mi_process_attach };
+ #pragma data_seg()
+ #endif
+
+ // use the fiber api for calling `_mi_thread_done`.
+ #include
+ #if (_WIN32_WINNT < 0x600) // before Windows Vista
+ WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
+ WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
+ WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
+ WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex);
+ #endif
+
+ static DWORD mi_fls_key = (DWORD)(-1);
+
+ static void NTAPI mi_fls_done(PVOID value) {
+ mi_heap_t* heap = (mi_heap_t*)value;
+ if (heap != NULL) {
+ _mi_thread_done(heap);
+ FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672
+ }
+ }
+
+ void _mi_prim_thread_init_auto_done(void) {
+ mi_fls_key = FlsAlloc(&mi_fls_done);
+ }
+
+ void _mi_prim_thread_done_auto_done(void) {
+ // call thread-done on all threads (except the main thread) to prevent
+ // dangling callback pointer if statically linked with a DLL; Issue #208
+ FlsFree(mi_fls_key);
+ }
+
+ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ mi_assert_internal(mi_fls_key != (DWORD)(-1));
+ FlsSetValue(mi_fls_key, heap);
+ }
+#endif
+
+// ----------------------------------------------------
+// Communicate with the redirection module on Windows
+// ----------------------------------------------------
+#if defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT)
+ #define MI_PRIM_HAS_ALLOCATOR_INIT 1
+
+ static bool mi_redirected = false; // true if malloc redirects to mi_malloc
+
+ bool _mi_is_redirected(void) {
+ return mi_redirected;
+ }
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+ mi_decl_export void _mi_redirect_entry(DWORD reason) {
+ // called on redirection; careful as this may be called before DllMain
+ mi_win_tls_init(reason);
+ if (reason == DLL_PROCESS_ATTACH) {
+ mi_redirected = true;
+ }
+ else if (reason == DLL_PROCESS_DETACH) {
+ mi_redirected = false;
+ }
+ else if (reason == DLL_THREAD_DETACH) {
+ _mi_thread_done(NULL);
+ }
+ }
+ __declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message);
+ __declspec(dllimport) void mi_cdecl mi_allocator_done(void);
+ #ifdef __cplusplus
+ }
+ #endif
+ bool _mi_allocator_init(const char** message) {
+ return mi_allocator_init(message);
+ }
+ void _mi_allocator_done(void) {
+ mi_allocator_done();
+ }
+#endif
diff --git a/compat/mimalloc/random.c b/compat/mimalloc/random.c
new file mode 100644
index 00000000000000..f17698ba8a6d08
--- /dev/null
+++ b/compat/mimalloc/random.c
@@ -0,0 +1,258 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2021, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // _mi_prim_random_buf
+#include // memset
+
+/* ----------------------------------------------------------------------------
+We use our own PRNG to keep predictable performance of random number generation
+and to avoid implementations that use a lock. We only use the OS provided
+random source to initialize the initial seeds. Since we do not need ultimate
+performance but we do rely on the security (for secret cookies in secure mode)
+we use a cryptographically secure generator (chacha20).
+-----------------------------------------------------------------------------*/
+
+#define MI_CHACHA_ROUNDS (20) // perhaps use 12 for better performance?
+
+
+/* ----------------------------------------------------------------------------
+Chacha20 implementation as the original algorithm with a 64-bit nonce
+and counter: https://en.wikipedia.org/wiki/Salsa20
+The input matrix has sixteen 32-bit values:
+Position 0 to 3: constant key
+Position 4 to 11: the key
+Position 12 to 13: the counter.
+Position 14 to 15: the nonce.
+
+The implementation uses regular C code which compiles very well on modern compilers.
+(gcc x64 has no register spills, and clang 6+ uses SSE instructions)
+-----------------------------------------------------------------------------*/
+
+static inline uint32_t rotl(uint32_t x, uint32_t shift) {
+ return (x << shift) | (x >> (32 - shift));
+}
+
+static inline void qround(uint32_t x[16], size_t a, size_t b, size_t c, size_t d) {
+ x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 16);
+ x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 12);
+ x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 8);
+ x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 7);
+}
+
+static void chacha_block(mi_random_ctx_t* ctx)
+{
+ // scramble into `x`
+ uint32_t x[16];
+ for (size_t i = 0; i < 16; i++) {
+ x[i] = ctx->input[i];
+ }
+ for (size_t i = 0; i < MI_CHACHA_ROUNDS; i += 2) {
+ qround(x, 0, 4, 8, 12);
+ qround(x, 1, 5, 9, 13);
+ qround(x, 2, 6, 10, 14);
+ qround(x, 3, 7, 11, 15);
+ qround(x, 0, 5, 10, 15);
+ qround(x, 1, 6, 11, 12);
+ qround(x, 2, 7, 8, 13);
+ qround(x, 3, 4, 9, 14);
+ }
+
+ // add scrambled data to the initial state
+ for (size_t i = 0; i < 16; i++) {
+ ctx->output[i] = x[i] + ctx->input[i];
+ }
+ ctx->output_available = 16;
+
+ // increment the counter for the next round
+ ctx->input[12] += 1;
+ if (ctx->input[12] == 0) {
+ ctx->input[13] += 1;
+ if (ctx->input[13] == 0) { // and keep increasing into the nonce
+ ctx->input[14] += 1;
+ }
+ }
+}
+
+static uint32_t chacha_next32(mi_random_ctx_t* ctx) {
+ if (ctx->output_available <= 0) {
+ chacha_block(ctx);
+ ctx->output_available = 16; // (assign again to suppress static analysis warning)
+ }
+ const uint32_t x = ctx->output[16 - ctx->output_available];
+ ctx->output[16 - ctx->output_available] = 0; // reset once the data is handed out
+ ctx->output_available--;
+ return x;
+}
+
+static inline uint32_t read32(const uint8_t* p, size_t idx32) {
+ const size_t i = 4*idx32;
+ return ((uint32_t)p[i+0] | (uint32_t)p[i+1] << 8 | (uint32_t)p[i+2] << 16 | (uint32_t)p[i+3] << 24);
+}
+
+static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t nonce)
+{
+ // since we only use chacha for randomness (and not encryption) we
+ // do not _need_ to read 32-bit values as little endian but we do anyways
+ // just for being compatible :-)
+ memset(ctx, 0, sizeof(*ctx));
+ for (size_t i = 0; i < 4; i++) {
+ const uint8_t* sigma = (uint8_t*)"expand 32-byte k";
+ ctx->input[i] = read32(sigma,i);
+ }
+ for (size_t i = 0; i < 8; i++) {
+ ctx->input[i + 4] = read32(key,i);
+ }
+ ctx->input[12] = 0;
+ ctx->input[13] = 0;
+ ctx->input[14] = (uint32_t)nonce;
+ ctx->input[15] = (uint32_t)(nonce >> 32);
+}
+
+static void chacha_split(mi_random_ctx_t* ctx, uint64_t nonce, mi_random_ctx_t* ctx_new) {
+ memset(ctx_new, 0, sizeof(*ctx_new));
+ _mi_memcpy(ctx_new->input, ctx->input, sizeof(ctx_new->input));
+ ctx_new->input[12] = 0;
+ ctx_new->input[13] = 0;
+ ctx_new->input[14] = (uint32_t)nonce;
+ ctx_new->input[15] = (uint32_t)(nonce >> 32);
+ mi_assert_internal(ctx->input[14] != ctx_new->input[14] || ctx->input[15] != ctx_new->input[15]); // do not reuse nonces!
+ chacha_block(ctx_new);
+}
+
+
+/* ----------------------------------------------------------------------------
+Random interface
+-----------------------------------------------------------------------------*/
+
+#if MI_DEBUG>1
+static bool mi_random_is_initialized(mi_random_ctx_t* ctx) {
+ return (ctx != NULL && ctx->input[0] != 0);
+}
+#endif
+
+void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) {
+ mi_assert_internal(mi_random_is_initialized(ctx));
+ mi_assert_internal(ctx != ctx_new);
+ chacha_split(ctx, (uintptr_t)ctx_new /*nonce*/, ctx_new);
+}
+
+uintptr_t _mi_random_next(mi_random_ctx_t* ctx) {
+ mi_assert_internal(mi_random_is_initialized(ctx));
+ uintptr_t r;
+ do {
+ #if MI_INTPTR_SIZE <= 4
+ r = chacha_next32(ctx);
+ #elif MI_INTPTR_SIZE == 8
+ r = (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx));
+ #else
+ # error "define mi_random_next for this platform"
+ #endif
+ } while (r==0);
+ return r;
+}
+
+
+/* ----------------------------------------------------------------------------
+To initialize a fresh random context.
+If we cannot get good randomness, we fall back to weak randomness based on a timer and ASLR.
+-----------------------------------------------------------------------------*/
+
+uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
+ uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
+ x ^= _mi_prim_clock_now();
+ // and do a few randomization steps
+ uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
+ for (uintptr_t i = 0; i < max || x==0; i++, x++) {
+ x = _mi_random_shuffle(x);
+ }
+ mi_assert_internal(x != 0);
+ return x;
+}
+
+static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
+ uint8_t key[32];
+ if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) {
+ // if we fail to get random data from the OS, we fall back to a
+ // weak random source based on the current time
+ #if !defined(__wasi__)
+ if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); }
+ #endif
+ uintptr_t x = _mi_os_random_weak(0);
+ for (size_t i = 0; i < 8; i++, x++) { // key is eight 32-bit words.
+ x = _mi_random_shuffle(x);
+ ((uint32_t*)key)[i] = (uint32_t)x;
+ }
+ ctx->weak = true;
+ }
+ else {
+ ctx->weak = false;
+ }
+ chacha_init(ctx, key, (uintptr_t)ctx /*nonce*/ );
+}
+
+void _mi_random_init(mi_random_ctx_t* ctx) {
+ mi_random_init_ex(ctx, false);
+}
+
+void _mi_random_init_weak(mi_random_ctx_t * ctx) {
+ mi_random_init_ex(ctx, true);
+}
+
+void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx) {
+ if (ctx->weak) {
+ _mi_random_init(ctx);
+ }
+}
+
+/* --------------------------------------------------------
+test vectors from
+----------------------------------------------------------- */
+/*
+static bool array_equals(uint32_t* x, uint32_t* y, size_t n) {
+ for (size_t i = 0; i < n; i++) {
+ if (x[i] != y[i]) return false;
+ }
+ return true;
+}
+static void chacha_test(void)
+{
+ uint32_t x[4] = { 0x11111111, 0x01020304, 0x9b8d6f43, 0x01234567 };
+ uint32_t x_out[4] = { 0xea2a92f4, 0xcb1cf8ce, 0x4581472e, 0x5881c4bb };
+ qround(x, 0, 1, 2, 3);
+ mi_assert_internal(array_equals(x, x_out, 4));
+
+ uint32_t y[16] = {
+ 0x879531e0, 0xc5ecf37d, 0x516461b1, 0xc9a62f8a,
+ 0x44c20ef3, 0x3390af7f, 0xd9fc690b, 0x2a5f714c,
+ 0x53372767, 0xb00a5631, 0x974c541a, 0x359e9963,
+ 0x5c971061, 0x3d631689, 0x2098d9d6, 0x91dbd320 };
+ uint32_t y_out[16] = {
+ 0x879531e0, 0xc5ecf37d, 0xbdb886dc, 0xc9a62f8a,
+ 0x44c20ef3, 0x3390af7f, 0xd9fc690b, 0xcfacafd2,
+ 0xe46bea80, 0xb00a5631, 0x974c541a, 0x359e9963,
+ 0x5c971061, 0xccc07c79, 0x2098d9d6, 0x91dbd320 };
+ qround(y, 2, 7, 8, 13);
+ mi_assert_internal(array_equals(y, y_out, 16));
+
+ mi_random_ctx_t r = {
+ { 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574,
+ 0x03020100, 0x07060504, 0x0b0a0908, 0x0f0e0d0c,
+ 0x13121110, 0x17161514, 0x1b1a1918, 0x1f1e1d1c,
+ 0x00000001, 0x09000000, 0x4a000000, 0x00000000 },
+ {0},
+ 0
+ };
+ uint32_t r_out[16] = {
+ 0xe4e7f110, 0x15593bd1, 0x1fdd0f50, 0xc47120a3,
+ 0xc7f4d1c7, 0x0368c033, 0x9aaa2204, 0x4e6cd4c3,
+ 0x466482d2, 0x09aa9f07, 0x05d7c214, 0xa2028bd9,
+ 0xd19c12b5, 0xb94e16de, 0xe883d0cb, 0x4e3c50a2 };
+ chacha_block(&r);
+ mi_assert_internal(array_equals(r.output, r_out, 16));
+}
+*/
diff --git a/compat/mimalloc/segment-map.c b/compat/mimalloc/segment-map.c
new file mode 100644
index 00000000000000..bbcea28aabc2e1
--- /dev/null
+++ b/compat/mimalloc/segment-map.c
@@ -0,0 +1,142 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* -----------------------------------------------------------
+ The following functions are to reliably find the segment or
+ block that encompasses any pointer p (or NULL if it is not
+ in any of our segments).
+ We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB)
+ set to 1 if it contains the segment meta data.
+----------------------------------------------------------- */
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+
+// Reduce total address space to reduce .bss (due to the `mi_segment_map`)
+#if (MI_INTPTR_SIZE > 4) && MI_TRACK_ASAN
+#define MI_SEGMENT_MAP_MAX_ADDRESS (128*1024ULL*MI_GiB) // 128 TiB (see issue #881)
+#elif (MI_INTPTR_SIZE > 4)
+#define MI_SEGMENT_MAP_MAX_ADDRESS (48*1024ULL*MI_GiB) // 48 TiB
+#else
+#define MI_SEGMENT_MAP_MAX_ADDRESS (UINT32_MAX)
+#endif
+
+#define MI_SEGMENT_MAP_PART_SIZE (MI_INTPTR_SIZE*MI_KiB - 128) // 128 > sizeof(mi_memid_t) !
+#define MI_SEGMENT_MAP_PART_BITS (8*MI_SEGMENT_MAP_PART_SIZE)
+#define MI_SEGMENT_MAP_PART_ENTRIES (MI_SEGMENT_MAP_PART_SIZE / MI_INTPTR_SIZE)
+#define MI_SEGMENT_MAP_PART_BIT_SPAN (MI_SEGMENT_ALIGN) // memory area covered by 1 bit
+
+#if (MI_SEGMENT_MAP_PART_BITS < (MI_SEGMENT_MAP_MAX_ADDRESS / MI_SEGMENT_MAP_PART_BIT_SPAN)) // prevent overflow on 32-bit (issue #1017)
+#define MI_SEGMENT_MAP_PART_SPAN (MI_SEGMENT_MAP_PART_BITS * MI_SEGMENT_MAP_PART_BIT_SPAN)
+#else
+#define MI_SEGMENT_MAP_PART_SPAN MI_SEGMENT_MAP_MAX_ADDRESS
+#endif
+
+#define MI_SEGMENT_MAP_MAX_PARTS ((MI_SEGMENT_MAP_MAX_ADDRESS / MI_SEGMENT_MAP_PART_SPAN) + 1)
+
+// A part of the segment map.
+typedef struct mi_segmap_part_s {
+ mi_memid_t memid;
+ _Atomic(uintptr_t) map[MI_SEGMENT_MAP_PART_ENTRIES];
+} mi_segmap_part_t;
+
+// Allocate parts on-demand to reduce .bss footprint
+static _Atomic(mi_segmap_part_t*) mi_segment_map[MI_SEGMENT_MAP_MAX_PARTS]; // = { NULL, .. }
+
+static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bool create_on_demand, size_t* idx, size_t* bitidx) {
+ // note: segment can be invalid or NULL.
+ mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE?
+ *idx = 0;
+ *bitidx = 0;
+ if ((uintptr_t)segment >= MI_SEGMENT_MAP_MAX_ADDRESS) return NULL;
+ const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_MAP_PART_SPAN;
+ if (segindex >= MI_SEGMENT_MAP_MAX_PARTS) return NULL;
+ mi_segmap_part_t* part = mi_atomic_load_ptr_relaxed(mi_segmap_part_t, &mi_segment_map[segindex]);
+
+ // allocate on demand to reduce .bss footprint
+ if mi_unlikely(part == NULL) {
+ if (!create_on_demand) return NULL;
+ mi_memid_t memid;
+ part = (mi_segmap_part_t*)_mi_os_zalloc(sizeof(mi_segmap_part_t), &memid);
+ if (part == NULL) return NULL;
+ part->memid = memid;
+ mi_segmap_part_t* expected = NULL;
+ if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) {
+ _mi_os_free(part, sizeof(mi_segmap_part_t), memid);
+ part = expected;
+ if (part == NULL) return NULL;
+ }
+ }
+ mi_assert(part != NULL);
+ const uintptr_t offset = ((uintptr_t)segment) % MI_SEGMENT_MAP_PART_SPAN;
+ const uintptr_t bitofs = offset / MI_SEGMENT_MAP_PART_BIT_SPAN;
+ *idx = bitofs / MI_INTPTR_BITS;
+ *bitidx = bitofs % MI_INTPTR_BITS;
+ return part;
+}
+
+void _mi_segment_map_allocated_at(const mi_segment_t* segment) {
+ if (segment->memid.memkind == MI_MEM_ARENA) return; // we lookup segments first in the arena's and don't need the segment map
+ size_t index;
+ size_t bitidx;
+ mi_segmap_part_t* part = mi_segment_map_index_of(segment, true /* alloc map if needed */, &index, &bitidx);
+ if (part == NULL) return; // outside our address range..
+ uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]);
+ uintptr_t newmask;
+ do {
+ newmask = (mask | ((uintptr_t)1 << bitidx));
+ } while (!mi_atomic_cas_weak_release(&part->map[index], &mask, newmask));
+}
+
+void _mi_segment_map_freed_at(const mi_segment_t* segment) {
+ if (segment->memid.memkind == MI_MEM_ARENA) return;
+ size_t index;
+ size_t bitidx;
+ mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* don't alloc if not present */, &index, &bitidx);
+ if (part == NULL) return; // outside our address range..
+ uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]);
+ uintptr_t newmask;
+ do {
+ newmask = (mask & ~((uintptr_t)1 << bitidx));
+ } while (!mi_atomic_cas_weak_release(&part->map[index], &mask, newmask));
+}
+
+// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
+static mi_segment_t* _mi_segment_of(const void* p) {
+ if (p == NULL) return NULL;
+ mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL
+ size_t index;
+ size_t bitidx;
+ mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* dont alloc if not present */, &index, &bitidx);
+ if (part == NULL) return NULL;
+ const uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]);
+ if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) {
+ bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie);
+ mi_assert_internal(cookie_ok); MI_UNUSED(cookie_ok);
+ return segment; // yes, allocated by us
+ }
+ return NULL;
+}
+
+// Is this a valid pointer in our heap?
+static bool mi_is_valid_pointer(const void* p) {
+ // first check if it is in an arena, then check if it is OS allocated
+ return (_mi_arena_contains(p) || _mi_segment_of(p) != NULL);
+}
+
+mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
+ return mi_is_valid_pointer(p);
+}
+
+void _mi_segment_map_unsafe_destroy(void) {
+ for (size_t i = 0; i < MI_SEGMENT_MAP_MAX_PARTS; i++) {
+ mi_segmap_part_t* part = mi_atomic_exchange_ptr_relaxed(mi_segmap_part_t, &mi_segment_map[i], NULL);
+ if (part != NULL) {
+ _mi_os_free(part, sizeof(mi_segmap_part_t), part->memid);
+ }
+ }
+}
diff --git a/compat/mimalloc/segment.c b/compat/mimalloc/segment.c
new file mode 100644
index 00000000000000..f440dc01a1db40
--- /dev/null
+++ b/compat/mimalloc/segment.c
@@ -0,0 +1,1706 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+
+#include // memset
+#include
+
+// -------------------------------------------------------------------
+// Segments
+// mimalloc pages reside in segments. See `mi_segment_valid` for invariants.
+// -------------------------------------------------------------------
+
+
+static void mi_segment_try_purge(mi_segment_t* segment, bool force);
+
+
+// -------------------------------------------------------------------
+// commit mask
+// -------------------------------------------------------------------
+
+static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false;
+ }
+ return true;
+}
+
+static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if ((commit->mask[i] & cm->mask[i]) != 0) return true;
+ }
+ return false;
+}
+
+static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ res->mask[i] = (commit->mask[i] & cm->mask[i]);
+ }
+}
+
+static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ res->mask[i] &= ~(cm->mask[i]);
+ }
+}
+
+static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ res->mask[i] |= cm->mask[i];
+ }
+}
+
+static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) {
+ mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
+ mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
+ if (bitcount == MI_COMMIT_MASK_BITS) {
+ mi_assert_internal(bitidx==0);
+ mi_commit_mask_create_full(cm);
+ }
+ else if (bitcount == 0) {
+ mi_commit_mask_create_empty(cm);
+ }
+ else {
+ mi_commit_mask_create_empty(cm);
+ size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS;
+ size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS;
+ while (bitcount > 0) {
+ mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT);
+ size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs;
+ size_t count = (bitcount > avail ? avail : bitcount);
+ size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs);
+ cm->mask[i] = mask;
+ bitcount -= count;
+ ofs = 0;
+ i++;
+ }
+ }
+}
+
+size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) {
+ mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
+ size_t count = 0;
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ size_t mask = cm->mask[i];
+ if (~mask == 0) {
+ count += MI_COMMIT_MASK_FIELD_BITS;
+ }
+ else {
+ for (; mask != 0; mask >>= 1) { // todo: use popcount
+ if ((mask&1)!=0) count++;
+ }
+ }
+ }
+ // we use total since for huge segments each commit bit may represent a larger size
+ return ((total / MI_COMMIT_MASK_BITS) * count);
+}
+
+
+size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) {
+ size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS;
+ size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS;
+ size_t mask = 0;
+ // find first ones
+ while (i < MI_COMMIT_MASK_FIELD_COUNT) {
+ mask = cm->mask[i];
+ mask >>= ofs;
+ if (mask != 0) {
+ while ((mask&1) == 0) {
+ mask >>= 1;
+ ofs++;
+ }
+ break;
+ }
+ i++;
+ ofs = 0;
+ }
+ if (i >= MI_COMMIT_MASK_FIELD_COUNT) {
+ // not found
+ *idx = MI_COMMIT_MASK_BITS;
+ return 0;
+ }
+ else {
+ // found, count ones
+ size_t count = 0;
+ *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs;
+ do {
+ mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1);
+ do {
+ count++;
+ mask >>= 1;
+ } while ((mask&1) == 1);
+ if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) {
+ i++;
+ if (i >= MI_COMMIT_MASK_FIELD_COUNT) break;
+ mask = cm->mask[i];
+ ofs = 0;
+ }
+ } while ((mask&1) == 1);
+ mi_assert_internal(count > 0);
+ return count;
+ }
+}
+
+
+/* --------------------------------------------------------------------------------
+ Segment allocation
+ We allocate pages inside bigger "segments" (32 MiB on 64-bit). This is to avoid
+ splitting VMA's on Linux and reduce fragmentation on other OS's.
+ Each thread owns its own segments.
+
+ Currently we have:
+ - small pages (64KiB)
+ - medium pages (512KiB)
+ - large pages (4MiB),
+ - huge segments have 1 page in one segment that can be larger than `MI_SEGMENT_SIZE`.
+ it is used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or with alignment `> MI_BLOCK_ALIGNMENT_MAX`.
+
+ The memory for a segment is usually committed on demand.
+ (i.e. we are careful to not touch the memory until we actually allocate a block there)
+
+ If a thread ends, it "abandons" pages that still contain live blocks.
+ Such segments are abandoned and these can be reclaimed by still running threads,
+ (much like work-stealing).
+-------------------------------------------------------------------------------- */
+
+
+/* -----------------------------------------------------------
+ Slices
+----------------------------------------------------------- */
+
+
+static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) {
+ return &segment->slices[segment->slice_entries];
+}
+
+static uint8_t* mi_slice_start(const mi_slice_t* slice) {
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment));
+ return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE));
+}
+
+
+/* -----------------------------------------------------------
+ Bins
+----------------------------------------------------------- */
+// Use bit scan forward to quickly find the first zero bit if it is available
+
+static inline size_t mi_slice_bin8(size_t slice_count) {
+ if (slice_count<=1) return slice_count;
+ mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT);
+ slice_count--;
+ size_t s = mi_bsr(slice_count); // slice_count > 1
+ if (s <= 2) return slice_count + 1;
+ size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4;
+ return bin;
+}
+
+static inline size_t mi_slice_bin(size_t slice_count) {
+ mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE);
+ mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX);
+ size_t bin = mi_slice_bin8(slice_count);
+ mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX);
+ return bin;
+}
+
+static inline size_t mi_slice_index(const mi_slice_t* slice) {
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ ptrdiff_t index = slice - segment->slices;
+ mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries);
+ return index;
+}
+
+
+/* -----------------------------------------------------------
+ Slice span queues
+----------------------------------------------------------- */
+
+static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) {
+ // todo: or push to the end?
+ mi_assert_internal(slice->prev == NULL && slice->next==NULL);
+ slice->prev = NULL; // paranoia
+ slice->next = sq->first;
+ sq->first = slice;
+ if (slice->next != NULL) slice->next->prev = slice;
+ else sq->last = slice;
+ slice->block_size = 0; // free
+}
+
+static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
+ size_t bin = mi_slice_bin(slice_count);
+ mi_span_queue_t* sq = &tld->spans[bin];
+ mi_assert_internal(sq->slice_count >= slice_count);
+ return sq;
+}
+
+static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
+ mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0);
+ // should work too if the queue does not contain slice (which can happen during reclaim)
+ if (slice->prev != NULL) slice->prev->next = slice->next;
+ if (slice == sq->first) sq->first = slice->next;
+ if (slice->next != NULL) slice->next->prev = slice->prev;
+ if (slice == sq->last) sq->last = slice->prev;
+ slice->prev = NULL;
+ slice->next = NULL;
+ slice->block_size = 1; // no more free
+}
+
+
+/* -----------------------------------------------------------
+ Invariant checking
+----------------------------------------------------------- */
+
+static bool mi_slice_is_used(const mi_slice_t* slice) {
+ return (slice->block_size > 0);
+}
+
+
+#if (MI_DEBUG>=3)
+static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) {
+ for (mi_slice_t* s = sq->first; s != NULL; s = s->next) {
+ if (s==slice) return true;
+ }
+ return false;
+}
+
+static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
+ mi_assert_internal(segment != NULL);
+ mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
+ mi_assert_internal(segment->abandoned <= segment->used);
+ mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id());
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks
+ //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0);
+ mi_slice_t* slice = &segment->slices[0];
+ const mi_slice_t* end = mi_segment_slices_end(segment);
+ size_t used_count = 0;
+ mi_span_queue_t* sq;
+ while(slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ size_t index = mi_slice_index(slice);
+ size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
+ if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets
+ used_count++;
+ mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE));
+ for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) {
+ mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
+ mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
+ mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1);
+ }
+ // and the last entry as well (for coalescing)
+ const mi_slice_t* last = slice + slice->slice_count - 1;
+ if (last > slice && last < mi_segment_slices_end(segment)) {
+ mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
+ mi_assert_internal(last->slice_count == 0);
+ mi_assert_internal(last->block_size == 1);
+ }
+ }
+ else { // free range of slices; only last slice needs a valid back offset
+ mi_slice_t* last = &segment->slices[maxindex];
+ if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) {
+ mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
+ }
+ mi_assert_internal(slice == last || last->slice_count == 0 );
+ mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1));
+ if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
+ sq = mi_span_queue_for(slice->slice_count,tld);
+ mi_assert_internal(mi_span_queue_contains(sq,slice));
+ }
+ }
+ slice = &segment->slices[maxindex+1];
+ }
+ mi_assert_internal(slice == end);
+ mi_assert_internal(used_count == segment->used + 1);
+ return true;
+}
+#endif
+
+/* -----------------------------------------------------------
+ Segment size calculations
+----------------------------------------------------------- */
+
+static size_t mi_segment_info_size(mi_segment_t* segment) {
+ return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE;
+}
+
+static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t block_size, size_t* page_size)
+{
+ const ptrdiff_t idx = slice - segment->slices;
+ const size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE;
+ uint8_t* const pstart = (uint8_t*)segment + (idx*MI_SEGMENT_SLICE_SIZE);
+ // make the start not OS page aligned for smaller blocks to avoid page/cache effects
+ // note: the offset must always be a block_size multiple since we assume small allocations
+ // are aligned (see `mi_heap_malloc_aligned`).
+ size_t start_offset = 0;
+ if (block_size > 0 && block_size <= MI_MAX_ALIGN_GUARANTEE) {
+ // for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
+ const size_t adjust = block_size - ((uintptr_t)pstart % block_size);
+ if (adjust < block_size && psize >= block_size + adjust) {
+ start_offset += adjust;
+ }
+ }
+ if (block_size >= MI_INTPTR_SIZE) {
+ if (block_size <= 64) { start_offset += 3*block_size; }
+ else if (block_size <= 512) { start_offset += block_size; }
+ }
+ start_offset = _mi_align_up(start_offset, MI_MAX_ALIGN_SIZE);
+ mi_assert_internal(_mi_is_aligned(pstart + start_offset, MI_MAX_ALIGN_SIZE));
+ mi_assert_internal(block_size == 0 || block_size > MI_MAX_ALIGN_GUARANTEE || _mi_is_aligned(pstart + start_offset,block_size));
+ if (page_size != NULL) { *page_size = psize - start_offset; }
+ return (pstart + start_offset);
+}
+
+// Start of the page available memory; can be used on uninitialized pages
+uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
+{
+ const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
+ uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size);
+ mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page);
+ mi_assert_internal(_mi_ptr_segment(p) == segment);
+ return p;
+}
+
+
+static size_t mi_segment_calculate_slices(size_t required, size_t* info_slices) {
+ size_t page_size = _mi_os_page_size();
+ size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size);
+ size_t guardsize = 0;
+
+ if (MI_SECURE>0) {
+ // in secure mode, we set up a protected page in between the segment info
+ // and the page data (and one at the end of the segment)
+ guardsize = page_size;
+ if (required > 0) {
+ required = _mi_align_up(required, MI_SEGMENT_SLICE_SIZE) + page_size;
+ }
+ }
+
+ isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE);
+ if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE;
+ size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) );
+ mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0);
+ return (segment_size / MI_SEGMENT_SLICE_SIZE);
+}
+
+
+/* ----------------------------------------------------------------------------
+Segment caches
+We keep a small segment cache per thread to increase local
+reuse and avoid setting/clearing guard pages in secure mode.
+------------------------------------------------------------------------------- */
+
+static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
+ if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
+ else _mi_stat_decrease(&tld->stats->segments,1);
+ tld->count += (segment_size >= 0 ? 1 : -1);
+ if (tld->count > tld->peak_count) tld->peak_count = tld->count;
+ tld->current_size += segment_size;
+ if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
+}
+
+static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
+ segment->thread_id = 0;
+ _mi_segment_map_freed_at(segment);
+ mi_segments_track_size(-((long)mi_segment_size(segment)),tld);
+ if (segment->was_reclaimed) {
+ tld->reclaim_count--;
+ segment->was_reclaimed = false;
+ }
+ if (MI_SECURE>0) {
+ // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set
+ // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted
+ size_t os_pagesize = _mi_os_page_size();
+ _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
+ uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
+ _mi_os_unprotect(end, os_pagesize);
+ }
+
+ // purge delayed decommits now? (no, leave it to the arena)
+ // mi_segment_try_purge(segment,true,tld->stats);
+
+ const size_t size = mi_segment_size(segment);
+ const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
+
+ _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid);
+}
+
+/* -----------------------------------------------------------
+ Commit/Decommit ranges
+----------------------------------------------------------- */
+
+static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
+ mi_assert_internal(_mi_ptr_segment(p + 1) == segment);
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
+ mi_commit_mask_create_empty(cm);
+ if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return;
+ const size_t segstart = mi_segment_info_size(segment);
+ const size_t segsize = mi_segment_size(segment);
+ if (p >= (uint8_t*)segment + segsize) return;
+
+ size_t pstart = (p - (uint8_t*)segment);
+ mi_assert_internal(pstart + size <= segsize);
+
+ size_t start;
+ size_t end;
+ if (conservative) {
+ // decommit conservative
+ start = _mi_align_up(pstart, MI_COMMIT_SIZE);
+ end = _mi_align_down(pstart + size, MI_COMMIT_SIZE);
+ mi_assert_internal(start >= segstart);
+ mi_assert_internal(end <= segsize);
+ }
+ else {
+ // commit liberal
+ start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE);
+ end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE);
+ }
+ if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area
+ start = segstart;
+ }
+ if (end > segsize) {
+ end = segsize;
+ }
+
+ mi_assert_internal(start <= pstart && (pstart + size) <= end);
+ mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0);
+ *start_p = (uint8_t*)segment + start;
+ *full_size = (end > start ? end - start : 0);
+ if (*full_size == 0) return;
+
+ size_t bitidx = start / MI_COMMIT_SIZE;
+ mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
+
+ size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0
+ if (bitidx + bitcount > MI_COMMIT_MASK_BITS) {
+ _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size);
+ }
+ mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
+ mi_commit_mask_create(bitidx, bitcount, cm);
+}
+
+static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size) {
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
+
+ // commit liberal
+ uint8_t* start = NULL;
+ size_t full_size = 0;
+ mi_commit_mask_t mask;
+ mi_segment_commit_mask(segment, false /* conservative? */, p, size, &start, &full_size, &mask);
+ if (mi_commit_mask_is_empty(&mask) || full_size == 0) return true;
+
+ if (!mi_commit_mask_all_set(&segment->commit_mask, &mask)) {
+ // committing
+ bool is_zero = false;
+ mi_commit_mask_t cmask;
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
+ _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
+ if (!_mi_os_commit(start, full_size, &is_zero)) return false;
+ mi_commit_mask_set(&segment->commit_mask, &mask);
+ }
+
+ // increase purge expiration when using part of delayed purges -- we assume more allocations are coming soon.
+ if (mi_commit_mask_any_set(&segment->purge_mask, &mask)) {
+ segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay);
+ }
+
+ // always clear any delayed purges in our range (as they are either committed now)
+ mi_commit_mask_clear(&segment->purge_mask, &mask);
+ return true;
+}
+
+static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size) {
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
+ // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
+ if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
+ return mi_segment_commit(segment, p, size);
+}
+
+static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size) {
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
+ if (!segment->allow_purge) return true;
+
+ // purge conservative
+ uint8_t* start = NULL;
+ size_t full_size = 0;
+ mi_commit_mask_t mask;
+ mi_segment_commit_mask(segment, true /* conservative? */, p, size, &start, &full_size, &mask);
+ if (mi_commit_mask_is_empty(&mask) || full_size==0) return true;
+
+ if (mi_commit_mask_any_set(&segment->commit_mask, &mask)) {
+ // purging
+ mi_assert_internal((void*)start != (void*)segment);
+ mi_assert_internal(segment->allow_decommit);
+ const bool decommitted = _mi_os_purge(start, full_size); // reset or decommit
+ if (decommitted) {
+ mi_commit_mask_t cmask;
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
+ _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting
+ mi_commit_mask_clear(&segment->commit_mask, &mask);
+ }
+ }
+
+ // always clear any scheduled purges in our range
+ mi_commit_mask_clear(&segment->purge_mask, &mask);
+ return true;
+}
+
+static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size) {
+ if (!segment->allow_purge) return;
+
+ if (mi_option_get(mi_option_purge_delay) == 0) {
+ mi_segment_purge(segment, p, size);
+ }
+ else {
+ // register for future purge in the purge mask
+ uint8_t* start = NULL;
+ size_t full_size = 0;
+ mi_commit_mask_t mask;
+ mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask);
+ if (mi_commit_mask_is_empty(&mask) || full_size==0) return;
+
+ // update delayed commit
+ mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask));
+ mi_commit_mask_t cmask;
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more
+ mi_commit_mask_set(&segment->purge_mask, &cmask);
+ mi_msecs_t now = _mi_clock_now();
+ if (segment->purge_expire == 0) {
+ // no previous purgess, initialize now
+ segment->purge_expire = now + mi_option_get(mi_option_purge_delay);
+ }
+ else if (segment->purge_expire <= now) {
+ // previous purge mask already expired
+ if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) {
+ mi_segment_try_purge(segment, true);
+ }
+ else {
+ segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's
+ }
+ }
+ else {
+ // previous purge mask is not yet expired, increase the expiration by a bit.
+ segment->purge_expire += mi_option_get(mi_option_purge_extend_delay);
+ }
+ }
+}
+
+static void mi_segment_try_purge(mi_segment_t* segment, bool force) {
+ if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return;
+ mi_msecs_t now = _mi_clock_now();
+ if (!force && now < segment->purge_expire) return;
+
+ mi_commit_mask_t mask = segment->purge_mask;
+ segment->purge_expire = 0;
+ mi_commit_mask_create_empty(&segment->purge_mask);
+
+ size_t idx;
+ size_t count;
+ mi_commit_mask_foreach(&mask, idx, count) {
+ // if found, decommit that sequence
+ if (count > 0) {
+ uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE);
+ size_t size = count * MI_COMMIT_SIZE;
+ mi_segment_purge(segment, p, size);
+ }
+ }
+ mi_commit_mask_foreach_end()
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
+}
+
+// called from `mi_heap_collect_ex`
+// this can be called per-page so it is important that try_purge has fast exit path
+void _mi_segment_collect(mi_segment_t* segment, bool force) {
+ mi_segment_try_purge(segment, force);
+}
+
+/* -----------------------------------------------------------
+ Span free
+----------------------------------------------------------- */
+
+static bool mi_segment_is_abandoned(mi_segment_t* segment) {
+ return (mi_atomic_load_relaxed(&segment->thread_id) == 0);
+}
+
+// note: can be called on abandoned segments
+static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice_index < segment->slice_entries);
+ mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment)
+ ? NULL : mi_span_queue_for(slice_count,tld));
+ if (slice_count==0) slice_count = 1;
+ mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries);
+
+ // set first and last slice (the intermediates can be undetermined)
+ mi_slice_t* slice = &segment->slices[slice_index];
+ slice->slice_count = (uint32_t)slice_count;
+ mi_assert_internal(slice->slice_count == slice_count); // no overflow?
+ slice->slice_offset = 0;
+ if (slice_count > 1) {
+ mi_slice_t* last = slice + slice_count - 1;
+ mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment);
+ if (last > end) { last = end; }
+ last->slice_count = 0;
+ last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
+ last->block_size = 0;
+ }
+
+ // perhaps decommit
+ if (allow_purge) {
+ mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE);
+ }
+
+ // and push it on the free page queue (if it was not a huge page)
+ if (sq != NULL) mi_span_queue_push( sq, slice );
+ else slice->block_size = 0; // mark huge page as free anyways
+}
+
+/*
+// called from reclaim to add existing free spans
+static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) {
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
+ size_t slice_index = mi_slice_index(slice);
+ mi_segment_span_free(segment,slice_index,slice->slice_count,tld);
+}
+*/
+
+static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0);
+ mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
+ mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
+ mi_span_queue_delete(sq, slice);
+}
+
+// note: can be called on abandoned segments
+static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0);
+ mi_segment_t* const segment = _mi_ptr_segment(slice);
+
+ // for huge pages, just mark as free but don't add to the queues
+ if (segment->kind == MI_SEGMENT_HUGE) {
+ // issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
+ mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
+ slice->block_size = 0; // mark as free anyways
+ // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
+ // avoid a possible cache miss (and the segment is about to be freed)
+ return slice;
+ }
+
+ // otherwise coalesce the span and add to the free span queues
+ const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment);
+ size_t slice_count = slice->slice_count;
+ mi_slice_t* next = slice + slice->slice_count;
+ mi_assert_internal(next <= mi_segment_slices_end(segment));
+ if (next < mi_segment_slices_end(segment) && next->block_size==0) {
+ // free next block -- remove it from free and merge
+ mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
+ slice_count += next->slice_count; // extend
+ if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); }
+ }
+ if (slice > segment->slices) {
+ mi_slice_t* prev = mi_slice_first(slice - 1);
+ mi_assert_internal(prev >= segment->slices);
+ if (prev->block_size==0) {
+ // free previous slice -- remove it from free and merge
+ mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
+ slice_count += prev->slice_count;
+ slice->slice_count = 0;
+ slice->slice_offset = (uint32_t)((uint8_t*)slice - (uint8_t*)prev); // set the slice offset for `segment_force_abandon` (in case the previous free block is very large).
+ if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); }
+ slice = prev;
+ }
+ }
+
+ // and add the new free page
+ mi_segment_span_free(segment, mi_slice_index(slice), slice_count, true, tld);
+ return slice;
+}
+
+
+
+/* -----------------------------------------------------------
+ Page allocation
+----------------------------------------------------------- */
+
+// Note: may still return NULL if committing the memory failed
+static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count) {
+ mi_assert_internal(slice_index < segment->slice_entries);
+ mi_slice_t* const slice = &segment->slices[slice_index];
+ mi_assert_internal(slice->block_size==0 || slice->block_size==1);
+
+ // commit before changing the slice data
+ if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE)) {
+ return NULL; // commit failed!
+ }
+
+ // convert the slices to a page
+ slice->slice_offset = 0;
+ slice->slice_count = (uint32_t)slice_count;
+ mi_assert_internal(slice->slice_count == slice_count);
+ const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
+ slice->block_size = bsize;
+ mi_page_t* page = mi_slice_to_page(slice);
+ mi_assert_internal(mi_page_block_size(page) == bsize);
+
+ // set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries
+ size_t extra = slice_count-1;
+ if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT;
+ if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
+
+ mi_slice_t* slice_next = slice + 1;
+ for (size_t i = 1; i <= extra; i++, slice_next++) {
+ slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
+ slice_next->slice_count = 0;
+ slice_next->block_size = 1;
+ }
+
+ // and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments)
+ // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543)
+ mi_slice_t* last = slice + slice_count - 1;
+ mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment);
+ if (last > end) last = end;
+ if (last > slice) {
+ last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice));
+ last->slice_count = 0;
+ last->block_size = 1;
+ }
+
+ // and initialize the page
+ page->is_committed = true;
+ page->is_zero_init = segment->free_is_zero;
+ page->is_huge = (segment->kind == MI_SEGMENT_HUGE);
+ segment->used++;
+ return page;
+}
+
+static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
+ mi_assert_internal(_mi_ptr_segment(slice) == segment);
+ mi_assert_internal(slice->slice_count >= slice_count);
+ mi_assert_internal(slice->block_size > 0); // no more in free queue
+ if (slice->slice_count <= slice_count) return;
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
+ size_t next_index = mi_slice_index(slice) + slice_count;
+ size_t next_count = slice->slice_count - slice_count;
+ mi_segment_span_free(segment, next_index, next_count, false /* don't purge left-over part */, tld);
+ slice->slice_count = (uint32_t)slice_count;
+}
+
+static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX);
+ // search from best fit up
+ mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld);
+ if (slice_count == 0) slice_count = 1;
+ while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) {
+ for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) {
+ if (slice->slice_count >= slice_count) {
+ // found one
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) {
+ // found a suitable page span
+ mi_span_queue_delete(sq, slice);
+
+ if (slice->slice_count > slice_count) {
+ mi_segment_slice_split(segment, slice, slice_count, tld);
+ }
+ mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0);
+ mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count);
+ if (page == NULL) {
+ // commit failed; return NULL but first restore the slice
+ mi_segment_span_free_coalesce(slice, tld);
+ return NULL;
+ }
+ return page;
+ }
+ }
+ }
+ sq++;
+ }
+ // could not find a page..
+ return NULL;
+}
+
+
+/* -----------------------------------------------------------
+ Segment allocation
+----------------------------------------------------------- */
+
+static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id,
+ size_t* psegment_slices, size_t* pinfo_slices,
+ bool commit, mi_segments_tld_t* tld)
+
+{
+ mi_memid_t memid;
+ bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
+ size_t align_offset = 0;
+ size_t alignment = MI_SEGMENT_ALIGN;
+
+ if (page_alignment > 0) {
+ // mi_assert_internal(huge_page != NULL);
+ mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN);
+ alignment = page_alignment;
+ const size_t info_size = (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE;
+ align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN );
+ const size_t extra = align_offset - info_size;
+ // recalculate due to potential guard pages
+ *psegment_slices = mi_segment_calculate_slices(required + extra, pinfo_slices);
+ mi_assert_internal(*psegment_slices > 0 && *psegment_slices <= UINT32_MAX);
+ }
+
+ const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
+ mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid);
+ if (segment == NULL) {
+ return NULL; // failed to allocate
+ }
+
+ // ensure metadata part of the segment is committed
+ mi_commit_mask_t commit_mask;
+ if (memid.initially_committed) {
+ mi_commit_mask_create_full(&commit_mask);
+ }
+ else {
+ // at least commit the info slices
+ const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
+ mi_assert_internal(commit_needed>0);
+ mi_commit_mask_create(0, commit_needed, &commit_mask);
+ mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
+ if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL)) {
+ _mi_arena_free(segment,segment_size,0,memid);
+ return NULL;
+ }
+ }
+ mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
+
+ segment->memid = memid;
+ segment->allow_decommit = !memid.is_pinned;
+ segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
+ segment->segment_size = segment_size;
+ segment->subproc = tld->subproc;
+ segment->commit_mask = commit_mask;
+ segment->purge_expire = 0;
+ segment->free_is_zero = memid.initially_zero;
+ mi_commit_mask_create_empty(&segment->purge_mask);
+
+ mi_segments_track_size((long)(segment_size), tld);
+ _mi_segment_map_allocated_at(segment);
+ return segment;
+}
+
+
+// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
+static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_page_t** huge_page)
+{
+ mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
+
+ // calculate needed sizes first
+ size_t info_slices;
+ size_t segment_slices = mi_segment_calculate_slices(required, &info_slices);
+ mi_assert_internal(segment_slices > 0 && segment_slices <= UINT32_MAX);
+
+ // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
+ const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems
+ _mi_current_thread_count() > 1 && // do not delay for the first N threads
+ tld->peak_count < (size_t)mi_option_get(mi_option_eager_commit_delay));
+ const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
+ bool commit = eager || (required > 0);
+
+ // Allocate the segment from the OS
+ mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
+ &segment_slices, &info_slices, commit, tld);
+ if (segment == NULL) return NULL;
+
+ // zero the segment info? -- not always needed as it may be zero initialized from the OS
+ if (!segment->memid.initially_zero) {
+ ptrdiff_t ofs = offsetof(mi_segment_t, next);
+ size_t prefix = offsetof(mi_segment_t, slices) - ofs;
+ size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more
+ _mi_memzero((uint8_t*)segment + ofs, zsize);
+ }
+
+ // initialize the rest of the segment info
+ const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
+ segment->segment_slices = segment_slices;
+ segment->segment_info_slices = info_slices;
+ segment->thread_id = _mi_thread_id();
+ segment->cookie = _mi_ptr_cookie(segment);
+ segment->slice_entries = slice_entries;
+ segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
+
+ // _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1));
+ _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment));
+
+ // set up guard pages
+ size_t guard_slices = 0;
+ if (MI_SECURE>0) {
+ // in secure mode, we set up a protected page in between the segment info
+ // and the page data, and at the end of the segment.
+ size_t os_pagesize = _mi_os_page_size();
+ _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
+ uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
+ mi_segment_ensure_committed(segment, end, os_pagesize);
+ _mi_os_protect(end, os_pagesize);
+ if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-(
+ guard_slices = 1;
+ }
+
+ // reserve first slices for segment info
+ mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices);
+ mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance
+ mi_assert_internal(segment->used == 1);
+ segment->used = 0; // don't count our internal slices towards usage
+
+ // initialize initial free pages
+ if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
+ mi_assert_internal(huge_page==NULL);
+ mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld);
+ }
+ else {
+ mi_assert_internal(huge_page!=NULL);
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
+ mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask));
+ *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices);
+ mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance
+ }
+
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
+ return segment;
+}
+
+
+static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
+ MI_UNUSED(force);
+ mi_assert_internal(segment != NULL);
+ mi_assert_internal(segment->next == NULL);
+ mi_assert_internal(segment->used == 0);
+
+ // in `mi_segment_force_abandon` we set this to true to ensure the segment's memory stays valid
+ if (segment->dont_free) return;
+
+ // Remove the free pages
+ mi_slice_t* slice = &segment->slices[0];
+ const mi_slice_t* end = mi_segment_slices_end(segment);
+ #if MI_DEBUG>1
+ size_t page_count = 0;
+ #endif
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages ..
+ if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
+ mi_segment_span_remove_from_queue(slice, tld);
+ }
+ #if MI_DEBUG>1
+ page_count++;
+ #endif
+ slice = slice + slice->slice_count;
+ }
+ mi_assert_internal(page_count == 2); // first page is allocated by the segment itself
+
+ // stats
+ // _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
+
+ // return it to the OS
+ mi_segment_os_free(segment, tld);
+}
+
+
+/* -----------------------------------------------------------
+ Page Free
+----------------------------------------------------------- */
+
+static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
+
+// note: can be called on abandoned pages
+static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
+ mi_assert_internal(page->block_size > 0);
+ mi_assert_internal(mi_page_all_free(page));
+ mi_segment_t* segment = _mi_ptr_segment(page);
+ mi_assert_internal(segment->used > 0);
+
+ size_t inuse = page->capacity * mi_page_block_size(page);
+ _mi_stat_decrease(&tld->stats->page_committed, inuse);
+ _mi_stat_decrease(&tld->stats->pages, 1);
+ _mi_stat_decrease(&tld->stats->page_bins[_mi_page_stats_bin(page)], 1);
+
+ // reset the page memory to reduce memory pressure?
+ if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
+ size_t psize;
+ uint8_t* start = _mi_segment_page_start(segment, page, &psize);
+ _mi_os_reset(start, psize);
+ }
+
+ // zero the page data, but not the segment fields and heap tag
+ page->is_zero_init = false;
+ uint8_t heap_tag = page->heap_tag;
+ ptrdiff_t ofs = offsetof(mi_page_t, capacity);
+ _mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
+ page->block_size = 1;
+ page->heap_tag = heap_tag;
+
+ // and free it
+ mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
+ segment->used--;
+ segment->free_is_zero = false;
+
+ // cannot assert segment valid as it is called during reclaim
+ // mi_assert_expensive(mi_segment_is_valid(segment, tld));
+ return slice;
+}
+
+void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
+{
+ mi_assert(page != NULL);
+ mi_segment_t* segment = _mi_page_segment(page);
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
+
+ // mark it as free now
+ mi_segment_page_clear(page, tld);
+ mi_assert_expensive(mi_segment_is_valid(segment, tld));
+
+ if (segment->used == 0) {
+ // no more used pages; remove from the free list and free the segment
+ mi_segment_free(segment, force, tld);
+ }
+ else if (segment->used == segment->abandoned) {
+ // only abandoned pages; remove from free list and abandon
+ mi_segment_abandon(segment,tld);
+ }
+ else {
+ // perform delayed purges
+ mi_segment_try_purge(segment, false /* force? */);
+ }
+}
+
+
+/* -----------------------------------------------------------
+Abandonment
+
+When threads terminate, they can leave segments with
+live blocks (reachable through other threads). Such segments
+are "abandoned" and will be reclaimed by other threads to
+reuse their pages and/or free them eventually. The
+`thread_id` of such segments is 0.
+
+When a block is freed in an abandoned segment, the segment
+is reclaimed into that thread.
+
+Moreover, if threads are looking for a fresh segment, they
+will first consider abandoned segments -- these can be found
+by scanning the arena memory
+(segments outside arena memoryare only reclaimed by a free).
+----------------------------------------------------------- */
+
+/* -----------------------------------------------------------
+ Abandon segment/page
+----------------------------------------------------------- */
+
+static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
+ mi_assert_internal(segment->used == segment->abandoned);
+ mi_assert_internal(segment->used > 0);
+ mi_assert_internal(segment->abandoned_visits == 0);
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
+
+ // remove the free pages from the free page queues
+ mi_slice_t* slice = &segment->slices[0];
+ const mi_slice_t* end = mi_segment_slices_end(segment);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (slice->block_size == 0) { // a free page
+ mi_segment_span_remove_from_queue(slice,tld);
+ slice->block_size = 0; // but keep it free
+ }
+ slice = slice + slice->slice_count;
+ }
+
+ // perform delayed decommits (forcing is much slower on mstress)
+ // Only abandoned segments in arena memory can be reclaimed without a free
+ // so if a segment is not from an arena we force purge here to be conservative.
+ const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge);
+ mi_segment_try_purge(segment, force_purge);
+
+ // all pages in the segment are abandoned; add it to the abandoned list
+ _mi_stat_increase(&tld->stats->segments_abandoned, 1);
+ mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
+ segment->thread_id = 0;
+ segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
+ if (segment->was_reclaimed) {
+ tld->reclaim_count--;
+ segment->was_reclaimed = false;
+ }
+ _mi_arena_segment_mark_abandoned(segment);
+}
+
+void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
+ mi_assert(page != NULL);
+ mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
+ mi_assert_internal(mi_page_heap(page) == NULL);
+ mi_segment_t* segment = _mi_page_segment(page);
+
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
+ segment->abandoned++;
+
+ _mi_stat_increase(&tld->stats->pages_abandoned, 1);
+ mi_assert_internal(segment->abandoned <= segment->used);
+ if (segment->used == segment->abandoned) {
+ // all pages are abandoned, abandon the entire segment
+ mi_segment_abandon(segment, tld);
+ }
+}
+
+/* -----------------------------------------------------------
+ Reclaim abandoned pages
+----------------------------------------------------------- */
+
+static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
+ mi_slice_t* slice = &segment->slices[0];
+ *end = mi_segment_slices_end(segment);
+ mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page
+ slice = slice + slice->slice_count; // skip the first segment allocated page
+ return slice;
+}
+
+// Possibly free pages and check if free space is available
+static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
+{
+ mi_assert_internal(mi_segment_is_abandoned(segment));
+ bool has_page = false;
+
+ // for all slices
+ const mi_slice_t* end;
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (mi_slice_is_used(slice)) { // used page
+ // ensure used count is up to date and collect potential concurrent frees
+ mi_page_t* const page = mi_slice_to_page(slice);
+ _mi_page_free_collect(page, false);
+ if (mi_page_all_free(page)) {
+ // if this page is all free now, free it without adding to any queues (yet)
+ mi_assert_internal(page->next == NULL && page->prev==NULL);
+ _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
+ segment->abandoned--;
+ slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
+ mi_assert_internal(!mi_slice_is_used(slice));
+ if (slice->slice_count >= slices_needed) {
+ has_page = true;
+ }
+ }
+ else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
+ // a page has available free blocks of the right size
+ has_page = true;
+ }
+ }
+ else {
+ // empty span
+ if (slice->slice_count >= slices_needed) {
+ has_page = true;
+ }
+ }
+ slice = slice + slice->slice_count;
+ }
+ return has_page;
+}
+
+// Reclaim an abandoned segment; returns NULL if the segment was freed
+// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
+static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
+ if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
+ // can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free.
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id());
+ mi_assert_internal(segment->subproc == heap->tld->segments.subproc); // only reclaim within the same subprocess
+ mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
+ segment->abandoned_visits = 0;
+ segment->was_reclaimed = true;
+ tld->reclaim_count++;
+ mi_segments_track_size((long)mi_segment_size(segment), tld);
+ mi_assert_internal(segment->next == NULL);
+ _mi_stat_decrease(&tld->stats->segments_abandoned, 1);
+
+ // for all slices
+ const mi_slice_t* end;
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (mi_slice_is_used(slice)) {
+ // in use: reclaim the page in our heap
+ mi_page_t* page = mi_slice_to_page(slice);
+ mi_assert_internal(page->is_committed);
+ mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
+ mi_assert_internal(mi_page_heap(page) == NULL);
+ mi_assert_internal(page->next == NULL && page->prev==NULL);
+ _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
+ segment->abandoned--;
+ // get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap)
+ mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects
+ if (target_heap == NULL) {
+ target_heap = heap;
+ _mi_error_message(EFAULT, "page with tag %u cannot be reclaimed by a heap with the same tag (using heap tag %u instead)\n", page->heap_tag, heap->tag );
+ }
+ // associate the heap with this page, and allow heap thread delayed free again.
+ mi_page_set_heap(page, target_heap);
+ _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
+ _mi_page_free_collect(page, false); // ensure used count is up to date
+ if (mi_page_all_free(page)) {
+ // if everything free by now, free the page
+ slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
+ }
+ else {
+ // otherwise reclaim it into the heap
+ _mi_page_reclaim(target_heap, page);
+ if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page) && heap == target_heap) {
+ if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
+ }
+ }
+ }
+ else {
+ // the span is free, add it to our page queues
+ slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing
+ }
+ mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0);
+ slice = slice + slice->slice_count;
+ }
+
+ mi_assert(segment->abandoned == 0);
+ mi_assert_expensive(mi_segment_is_valid(segment, tld));
+ if (segment->used == 0) { // due to page_clear
+ mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed));
+ mi_segment_free(segment, false, tld);
+ return NULL;
+ }
+ else {
+ return segment;
+ }
+}
+
+
+// attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`)
+bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
+ if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
+ if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess
+ if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
+ const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
+ if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
+
+ // don't reclaim more from a `free` call than half the current segments
+ // this is to prevent a pure free-ing thread to start owning too many segments
+ // (but not for out-of-arena segments as that is the main way to be reclaimed for those)
+ if (segment->memid.memkind == MI_MEM_ARENA && heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) {
+ return false;
+ }
+ if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon
+ mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
+ mi_assert_internal(res == segment);
+ return (res != NULL);
+ }
+ return false;
+}
+
+void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
+ mi_segment_t* segment;
+ mi_arena_field_cursor_t current;
+ _mi_arena_field_cursor_init(heap, tld->subproc, true /* visit all, blocking */, ¤t);
+ while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) {
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
+ }
+ _mi_arena_field_cursor_done(¤t);
+}
+
+
+static bool segment_count_is_within_target(mi_segments_tld_t* tld, size_t* ptarget) {
+ const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 0, 1024);
+ if (ptarget != NULL) { *ptarget = target; }
+ return (target == 0 || tld->count < target);
+}
+
+static long mi_segment_get_reclaim_tries(mi_segments_tld_t* tld) {
+ // limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries.
+ const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
+ if (perc <= 0) return 0;
+ const size_t total_count = mi_atomic_load_relaxed(&tld->subproc->abandoned_count);
+ if (total_count == 0) return 0;
+ const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow
+ long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count));
+ if (max_tries < 8 && total_count > 8) { max_tries = 8; }
+ return max_tries;
+}
+
+static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld)
+{
+ *reclaimed = false;
+ long max_tries = mi_segment_get_reclaim_tries(tld);
+ if (max_tries <= 0) return NULL;
+
+ mi_segment_t* result = NULL;
+ mi_segment_t* segment = NULL;
+ mi_arena_field_cursor_t current;
+ _mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t);
+ while (segment_count_is_within_target(tld,NULL) && (max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
+ {
+ mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process
+ segment->abandoned_visits++;
+ // todo: should we respect numa affinity for abandoned reclaim? perhaps only for the first visit?
+ // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries
+ // Perhaps we can skip non-suitable ones in a better way?
+ bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
+ bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
+ if (segment->used == 0) {
+ // free the segment (by forced reclaim) to make it available to other threads.
+ // note1: we prefer to free a segment as that might lead to reclaiming another
+ // segment that is still partially used.
+ // note2: we could in principle optimize this by skipping reclaim and directly
+ // freeing but that would violate some invariants temporarily)
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
+ }
+ else if (has_page && is_suitable) {
+ // found a large enough free span, or a page of the right block_size with free space
+ // we return the result of reclaim (which is usually `segment`) as it might free
+ // the segment due to concurrent frees (in which case `NULL` is returned).
+ result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
+ break;
+ }
+ else if (segment->abandoned_visits > 3 && is_suitable) {
+ // always reclaim on 3rd visit to limit the abandoned segment count.
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
+ }
+ else {
+ // otherwise, push on the visited list so it gets not looked at too quickly again
+ max_tries++; // don't count this as a try since it was not suitable
+ mi_segment_try_purge(segment, false /* true force? */); // force purge if needed as we may not visit soon again
+ _mi_arena_segment_mark_abandoned(segment);
+ }
+ }
+ _mi_arena_field_cursor_done(¤t);
+ return result;
+}
+
+// collect abandoned segments
+void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
+{
+ mi_segment_t* segment;
+ mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, tld->subproc, force /* blocking? */, ¤t);
+ long max_tries = (force ? (long)mi_atomic_load_relaxed(&tld->subproc->abandoned_count) : 1024); // limit latency
+ while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) {
+ mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
+ if (segment->used == 0) {
+ // free the segment (by forced reclaim) to make it available to other threads.
+ // note: we could in principle optimize this by skipping reclaim and directly
+ // freeing but that would violate some invariants temporarily)
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
+ }
+ else {
+ // otherwise, purge if needed and push on the visited list
+ // note: forced purge can be expensive if many threads are destroyed/created as in mstress.
+ mi_segment_try_purge(segment, force);
+ _mi_arena_segment_mark_abandoned(segment);
+ }
+ }
+ _mi_arena_field_cursor_done(¤t);
+}
+
+/* -----------------------------------------------------------
+ Force abandon a segment that is in use by our thread
+----------------------------------------------------------- */
+
+// force abandon a segment
+static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* tld)
+{
+ mi_assert_internal(!mi_segment_is_abandoned(segment));
+ mi_assert_internal(!segment->dont_free);
+
+ // ensure the segment does not get free'd underneath us (so we can check if a page has been freed in `mi_page_force_abandon`)
+ segment->dont_free = true;
+
+ // for all slices
+ const mi_slice_t* end;
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (mi_slice_is_used(slice)) {
+ // ensure used count is up to date and collect potential concurrent frees
+ mi_page_t* const page = mi_slice_to_page(slice);
+ _mi_page_free_collect(page, false);
+ {
+ // abandon the page if it is still in-use (this will free it if possible as well)
+ mi_assert_internal(segment->used > 0);
+ if (segment->used == segment->abandoned+1) {
+ // the last page.. abandon and return as the segment will be abandoned after this
+ // and we should no longer access it.
+ segment->dont_free = false;
+ _mi_page_force_abandon(page);
+ return;
+ }
+ else {
+ // abandon and continue
+ _mi_page_force_abandon(page);
+ // it might be freed, reset the slice (note: relies on coalesce setting the slice_offset)
+ slice = mi_slice_first(slice);
+ }
+ }
+ }
+ slice = slice + slice->slice_count;
+ }
+ segment->dont_free = false;
+ mi_assert(segment->used == segment->abandoned);
+ mi_assert(segment->used == 0);
+ if (segment->used == 0) { // paranoia
+ // all free now
+ mi_segment_free(segment, false, tld);
+ }
+ else {
+ // perform delayed purges
+ mi_segment_try_purge(segment, false /* force? */);
+ }
+}
+
+
+// try abandon segments.
+// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
+static void mi_segments_try_abandon_to_target(mi_heap_t* heap, size_t target, mi_segments_tld_t* tld) {
+ if (target <= 1) return;
+ const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
+ // todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages
+ for (int i = 0; i < 64 && tld->count >= min_target; i++) {
+ mi_page_t* page = heap->pages[MI_BIN_FULL].first;
+ while (page != NULL && mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX) {
+ page = page->next;
+ }
+ if (page==NULL) {
+ break;
+ }
+ mi_segment_t* segment = _mi_page_segment(page);
+ mi_segment_force_abandon(segment, tld);
+ mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
+ }
+}
+
+// try abandon segments.
+// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
+static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
+ // we call this when we are about to add a fresh segment so we should be under our target segment count.
+ size_t target = 0;
+ if (segment_count_is_within_target(tld, &target)) return;
+ mi_segments_try_abandon_to_target(heap, target, tld);
+}
+
+void mi_collect_reduce(size_t target_size) mi_attr_noexcept {
+ mi_collect(true);
+ mi_heap_t* heap = mi_heap_get_default();
+ mi_segments_tld_t* tld = &heap->tld->segments;
+ size_t target = target_size / MI_SEGMENT_SIZE;
+ if (target == 0) {
+ target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 1, 1024);
+ }
+ mi_segments_try_abandon_to_target(heap, target, tld);
+}
+
+/* -----------------------------------------------------------
+ Reclaim or allocate
+----------------------------------------------------------- */
+
+static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld)
+{
+ mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
+
+ // try to abandon some segments to increase reuse between threads
+ mi_segments_try_abandon(heap,tld);
+
+ // 1. try to reclaim an abandoned segment
+ bool reclaimed;
+ mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
+ if (reclaimed) {
+ // reclaimed the right page right into the heap
+ mi_assert_internal(segment != NULL);
+ return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks
+ }
+ else if (segment != NULL) {
+ // reclaimed a segment with a large enough empty span in it
+ return segment;
+ }
+ // 2. otherwise allocate a fresh segment
+ return mi_segment_alloc(0, 0, heap->arena_id, tld, NULL);
+}
+
+
+/* -----------------------------------------------------------
+ Page allocation
+----------------------------------------------------------- */
+
+static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld)
+{
+ mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
+
+ // find a free page
+ size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
+ size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
+ mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size);
+ mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
+ if (page==NULL) {
+ // no free page, allocate a new segment and try again
+ if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld) == NULL) {
+ // OOM or reclaimed a good page in the heap
+ return NULL;
+ }
+ else {
+ // otherwise try again
+ return mi_segments_page_alloc(heap, page_kind, required, block_size, tld);
+ }
+ }
+ mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
+ mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id());
+ mi_segment_try_purge(_mi_ptr_segment(page), false);
+ return page;
+}
+
+
+
+/* -----------------------------------------------------------
+ Huge page allocation
+----------------------------------------------------------- */
+
+static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
+{
+ mi_page_t* page = NULL;
+ mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,&page);
+ if (segment == NULL || page==NULL) return NULL;
+ mi_assert_internal(segment->used==1);
+ mi_assert_internal(mi_page_block_size(page) >= size);
+ #if MI_HUGE_PAGE_ABANDON
+ segment->thread_id = 0; // huge segments are immediately abandoned
+ #endif
+
+ // for huge pages we initialize the block_size as we may
+ // overallocate to accommodate large alignments.
+ size_t psize;
+ uint8_t* start = _mi_segment_page_start(segment, page, &psize);
+ page->block_size = psize;
+ mi_assert_internal(page->is_huge);
+
+ // decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
+ if (page_alignment > 0 && segment->allow_decommit) {
+ uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
+ mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
+ mi_assert_internal(psize - (aligned_p - start) >= size);
+ uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
+ ptrdiff_t decommit_size = aligned_p - decommit_start;
+ _mi_os_reset(decommit_start, decommit_size); // note: cannot use segment_decommit on huge segments
+ }
+
+ return page;
+}
+
+#if MI_HUGE_PAGE_ABANDON
+// free huge block from another thread
+void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
+ // huge page segments are always abandoned and can be freed immediately by any thread
+ mi_assert_internal(segment->kind==MI_SEGMENT_HUGE);
+ mi_assert_internal(segment == _mi_page_segment(page));
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0);
+
+ // claim it and free
+ mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
+ // paranoia: if this it the last reference, the cas should always succeed
+ size_t expected_tid = 0;
+ if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) {
+ mi_block_set_next(page, block, page->free);
+ page->free = block;
+ page->used--;
+ page->is_zero_init = false;
+ mi_assert(page->used == 0);
+ mi_tld_t* tld = heap->tld;
+ _mi_segment_page_free(page, true, &tld->segments);
+ }
+#if (MI_DEBUG!=0)
+ else {
+ mi_assert_internal(false);
+ }
+#endif
+}
+
+#else
+// reset memory of a huge block from another thread
+void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
+ MI_UNUSED(page);
+ mi_assert_internal(segment->kind == MI_SEGMENT_HUGE);
+ mi_assert_internal(segment == _mi_page_segment(page));
+ mi_assert_internal(page->used == 1); // this is called just before the free
+ mi_assert_internal(page->free == NULL);
+ if (segment->allow_decommit) {
+ size_t csize = mi_usable_size(block);
+ if (csize > sizeof(mi_block_t)) {
+ csize = csize - sizeof(mi_block_t);
+ uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
+ _mi_os_reset(p, csize); // note: cannot use segment_decommit on huge segments
+ }
+ }
+}
+#endif
+
+/* -----------------------------------------------------------
+ Page allocation and free
+----------------------------------------------------------- */
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld) {
+ mi_page_t* page;
+ if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
+ mi_assert_internal(_mi_is_power_of_two(page_alignment));
+ mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
+ if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
+ page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld);
+ }
+ else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
+ page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld);
+ }
+ else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld);
+ }
+ else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
+ page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld);
+ }
+ else {
+ page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld);
+ }
+ mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
+ mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
+ mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc);
+ return page;
+}
+
+
+/* -----------------------------------------------------------
+ Visit blocks in a segment (only used for abandoned segments)
+----------------------------------------------------------- */
+
+static bool mi_segment_visit_page(mi_page_t* page, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
+ mi_heap_area_t area;
+ _mi_heap_area_init(&area, page);
+ if (!visitor(NULL, &area, NULL, area.block_size, arg)) return false;
+ if (visit_blocks) {
+ return _mi_heap_area_visit_blocks(&area, page, visitor, arg);
+ }
+ else {
+ return true;
+ }
+}
+
+bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
+ const mi_slice_t* end;
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
+ while (slice < end) {
+ if (mi_slice_is_used(slice)) {
+ mi_page_t* const page = mi_slice_to_page(slice);
+ if (heap_tag < 0 || (int)page->heap_tag == heap_tag) {
+ if (!mi_segment_visit_page(page, visit_blocks, visitor, arg)) return false;
+ }
+ }
+ slice = slice + slice->slice_count;
+ }
+ return true;
+}
diff --git a/compat/mimalloc/stats.c b/compat/mimalloc/stats.c
new file mode 100644
index 00000000000000..36e8c9813edb09
--- /dev/null
+++ b/compat/mimalloc/stats.c
@@ -0,0 +1,633 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
+
+#include // memset
+
+#if defined(_MSC_VER) && (_MSC_VER < 1920)
+#pragma warning(disable:4204) // non-constant aggregate initializer
+#endif
+
+/* -----------------------------------------------------------
+ Statistics operations
+----------------------------------------------------------- */
+
+static bool mi_is_in_main(void* stat) {
+ return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
+ && (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
+}
+
+static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
+ if (amount == 0) return;
+ if mi_unlikely(mi_is_in_main(stat))
+ {
+ // add atomically (for abandoned pages)
+ int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount);
+ // if (stat == &_mi_stats_main.committed) { mi_assert_internal(current + amount >= 0); };
+ mi_atomic_maxi64_relaxed(&stat->peak, current + amount);
+ if (amount > 0) {
+ mi_atomic_addi64_relaxed(&stat->total,amount);
+ }
+ }
+ else {
+ // add thread local
+ stat->current += amount;
+ if (stat->current > stat->peak) { stat->peak = stat->current; }
+ if (amount > 0) { stat->total += amount; }
+ }
+}
+
+void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
+ if (mi_is_in_main(stat)) {
+ mi_atomic_addi64_relaxed( &stat->total, (int64_t)amount );
+ }
+ else {
+ stat->total += amount;
+ }
+}
+
+void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) {
+ mi_stat_update(stat, (int64_t)amount);
+}
+
+void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
+ mi_stat_update(stat, -((int64_t)amount));
+}
+
+
+static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) {
+ if (amount == 0) return;
+ if mi_unlikely(mi_is_in_main(stat))
+ {
+ // adjust atomically
+ mi_atomic_addi64_relaxed(&stat->current, amount);
+ mi_atomic_addi64_relaxed(&stat->total,amount);
+ }
+ else {
+ // adjust local
+ stat->current += amount;
+ stat->total += amount;
+ }
+}
+
+void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) {
+ mi_stat_adjust(stat, -((int64_t)amount));
+}
+
+
+// must be thread safe as it is called from stats_merge
+static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) {
+ if (stat==src) return;
+ mi_atomic_void_addi64_relaxed(&stat->total, &src->total);
+ const int64_t prev_current = mi_atomic_addi64_relaxed(&stat->current, src->current);
+
+ // Global current plus thread peak approximates new global peak
+ // note: peak scores do really not work across threads.
+ // we used to just add them together but that often overestimates in practice.
+ // similarly, max does not seem to work well. The current approach
+ // by Artem Kharytoniuk (@artem-lunarg) seems to work better, see PR#1112
+ // for a longer description.
+ mi_atomic_maxi64_relaxed(&stat->peak, prev_current + src->peak);
+}
+
+static void mi_stat_counter_add_mt(mi_stat_counter_t* stat, const mi_stat_counter_t* src) {
+ if (stat==src) return;
+ mi_atomic_void_addi64_relaxed(&stat->total, &src->total);
+}
+
+#define MI_STAT_COUNT(stat) mi_stat_count_add_mt(&stats->stat, &src->stat);
+#define MI_STAT_COUNTER(stat) mi_stat_counter_add_mt(&stats->stat, &src->stat);
+
+// must be thread safe as it is called from stats_merge
+static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
+ if (stats==src) return;
+
+ // copy all fields
+ MI_STAT_FIELDS()
+
+ #if MI_STAT>1
+ for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
+ mi_stat_count_add_mt(&stats->malloc_bins[i], &src->malloc_bins[i]);
+ }
+ #endif
+ for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
+ mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]);
+ }
+}
+
+#undef MI_STAT_COUNT
+#undef MI_STAT_COUNTER
+
+/* -----------------------------------------------------------
+ Display statistics
+----------------------------------------------------------- */
+
+// unit > 0 : size in binary bytes
+// unit == 0: count as decimal
+// unit < 0 : count in binary
+static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) {
+ char buf[32]; buf[0] = 0;
+ int len = 32;
+ const char* suffix = (unit <= 0 ? " " : "B");
+ const int64_t base = (unit == 0 ? 1000 : 1024);
+ if (unit>0) n *= unit;
+
+ const int64_t pos = (n < 0 ? -n : n);
+ if (pos < base) {
+ if (n!=1 || suffix[0] != 'B') { // skip printing 1 B for the unit column
+ _mi_snprintf(buf, len, "%lld %-3s", (long long)n, (n==0 ? "" : suffix));
+ }
+ }
+ else {
+ int64_t divider = base;
+ const char* magnitude = "K";
+ if (pos >= divider*base) { divider *= base; magnitude = "M"; }
+ if (pos >= divider*base) { divider *= base; magnitude = "G"; }
+ const int64_t tens = (n / (divider/10));
+ const long whole = (long)(tens/10);
+ const long frac1 = (long)(tens%10);
+ char unitdesc[8];
+ _mi_snprintf(unitdesc, 8, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix);
+ _mi_snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc);
+ }
+ _mi_fprintf(out, arg, (fmt==NULL ? "%12s" : fmt), buf);
+}
+
+
+static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg) {
+ mi_printf_amount(n,unit,out,arg,NULL);
+}
+
+static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out, void* arg) {
+ if (unit==1) _mi_fprintf(out, arg, "%12s"," ");
+ else mi_print_amount(n,0,out,arg);
+}
+
+static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg, const char* notok ) {
+ _mi_fprintf(out, arg,"%10s:", msg);
+ if (unit != 0) {
+ if (unit > 0) {
+ mi_print_amount(stat->peak, unit, out, arg);
+ mi_print_amount(stat->total, unit, out, arg);
+ // mi_print_amount(stat->freed, unit, out, arg);
+ mi_print_amount(stat->current, unit, out, arg);
+ mi_print_amount(unit, 1, out, arg);
+ mi_print_count(stat->total, unit, out, arg);
+ }
+ else {
+ mi_print_amount(stat->peak, -1, out, arg);
+ mi_print_amount(stat->total, -1, out, arg);
+ // mi_print_amount(stat->freed, -1, out, arg);
+ mi_print_amount(stat->current, -1, out, arg);
+ if (unit == -1) {
+ _mi_fprintf(out, arg, "%24s", "");
+ }
+ else {
+ mi_print_amount(-unit, 1, out, arg);
+ mi_print_count((stat->total / -unit), 0, out, arg);
+ }
+ }
+ if (stat->current != 0) {
+ _mi_fprintf(out, arg, " ");
+ _mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok));
+ _mi_fprintf(out, arg, "\n");
+ }
+ else {
+ _mi_fprintf(out, arg, " ok\n");
+ }
+ }
+ else {
+ mi_print_amount(stat->peak, 1, out, arg);
+ mi_print_amount(stat->total, 1, out, arg);
+ _mi_fprintf(out, arg, "%11s", " "); // no freed
+ mi_print_amount(stat->current, 1, out, arg);
+ _mi_fprintf(out, arg, "\n");
+ }
+}
+
+static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) {
+ mi_stat_print_ex(stat, msg, unit, out, arg, NULL);
+}
+
+#if MI_STAT>1
+static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) {
+ _mi_fprintf(out, arg, "%10s:", msg);
+ _mi_fprintf(out, arg, "%12s", " "); // no peak
+ mi_print_amount(stat->total, unit, out, arg);
+ _mi_fprintf(out, arg, "\n");
+}
+#endif
+
+static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) {
+ _mi_fprintf(out, arg, "%10s:", msg);
+ mi_print_amount(stat->total, -1, out, arg);
+ _mi_fprintf(out, arg, "\n");
+}
+
+
+static void mi_stat_average_print(size_t count, size_t total, const char* msg, mi_output_fun* out, void* arg) {
+ const int64_t avg_tens = (count == 0 ? 0 : (total*10 / count));
+ const long avg_whole = (long)(avg_tens/10);
+ const long avg_frac1 = (long)(avg_tens%10);
+ _mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1);
+}
+
+
+static void mi_print_header(mi_output_fun* out, void* arg ) {
+ _mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# ");
+}
+
+#if MI_STAT>1
+static void mi_stats_print_bins(const mi_stat_count_t* bins, size_t max, const char* fmt, mi_output_fun* out, void* arg) {
+ bool found = false;
+ char buf[64];
+ for (size_t i = 0; i <= max; i++) {
+ if (bins[i].total > 0) {
+ found = true;
+ int64_t unit = _mi_bin_size((uint8_t)i);
+ _mi_snprintf(buf, 64, "%s %3lu", fmt, (long)i);
+ mi_stat_print(&bins[i], buf, unit, out, arg);
+ }
+ }
+ if (found) {
+ _mi_fprintf(out, arg, "\n");
+ mi_print_header(out, arg);
+ }
+}
+#endif
+
+
+
+//------------------------------------------------------------
+// Use an output wrapper for line-buffered output
+// (which is nice when using loggers etc.)
+//------------------------------------------------------------
+typedef struct buffered_s {
+ mi_output_fun* out; // original output function
+ void* arg; // and state
+ char* buf; // local buffer of at least size `count+1`
+ size_t used; // currently used chars `used <= count`
+ size_t count; // total chars available for output
+} buffered_t;
+
+static void mi_buffered_flush(buffered_t* buf) {
+ buf->buf[buf->used] = 0;
+ _mi_fputs(buf->out, buf->arg, NULL, buf->buf);
+ buf->used = 0;
+}
+
+static void mi_cdecl mi_buffered_out(const char* msg, void* arg) {
+ buffered_t* buf = (buffered_t*)arg;
+ if (msg==NULL || buf==NULL) return;
+ for (const char* src = msg; *src != 0; src++) {
+ char c = *src;
+ if (buf->used >= buf->count) mi_buffered_flush(buf);
+ mi_assert_internal(buf->used < buf->count);
+ buf->buf[buf->used++] = c;
+ if (c == '\n') mi_buffered_flush(buf);
+ }
+}
+
+//------------------------------------------------------------
+// Print statistics
+//------------------------------------------------------------
+
+static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) mi_attr_noexcept {
+ // wrap the output function to be line buffered
+ char buf[256];
+ buffered_t buffer = { out0, arg0, NULL, 0, 255 };
+ buffer.buf = buf;
+ mi_output_fun* out = &mi_buffered_out;
+ void* arg = &buffer;
+
+ // and print using that
+ mi_print_header(out,arg);
+ #if MI_STAT>1
+ mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg);
+ #endif
+ #if MI_STAT
+ mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
+ // mi_stat_print(&stats->malloc_large, "large", (stats->malloc_large_count.total == 0 ? 1 : -1), out, arg);
+ mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg);
+ mi_stat_count_t total = { 0,0,0 };
+ mi_stat_count_add_mt(&total, &stats->malloc_normal);
+ // mi_stat_count_add(&total, &stats->malloc_large);
+ mi_stat_count_add_mt(&total, &stats->malloc_huge);
+ mi_stat_print_ex(&total, "total", 1, out, arg, "");
+ #endif
+ #if MI_STAT>1
+ mi_stat_total_print(&stats->malloc_requested, "malloc req", 1, out, arg);
+ _mi_fprintf(out, arg, "\n");
+ #endif
+ mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, "");
+ mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, "");
+ mi_stat_counter_print(&stats->reset, "reset", out, arg );
+ mi_stat_counter_print(&stats->purged, "purged", out, arg );
+ mi_stat_print_ex(&stats->page_committed, "touched", 1, out, arg, "");
+ mi_stat_print(&stats->segments, "segments", -1, out, arg);
+ mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg);
+ mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg);
+ mi_stat_print(&stats->pages, "pages", -1, out, arg);
+ mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg);
+ mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg);
+ mi_stat_counter_print(&stats->pages_retire, "-retire", out, arg);
+ mi_stat_counter_print(&stats->arena_count, "arenas", out, arg);
+ // mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg);
+ mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg);
+ mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg);
+ mi_stat_counter_print(&stats->commit_calls, "commits", out, arg);
+ mi_stat_counter_print(&stats->reset_calls, "resets", out, arg);
+ mi_stat_counter_print(&stats->purge_calls, "purges", out, arg);
+ mi_stat_counter_print(&stats->malloc_guarded_count, "guarded", out, arg);
+ mi_stat_print(&stats->threads, "threads", -1, out, arg);
+ mi_stat_average_print(stats->page_searches_count.total, stats->page_searches.total, "searches", out, arg);
+ _mi_fprintf(out, arg, "%10s: %5i\n", "numa nodes", _mi_os_numa_node_count());
+
+ size_t elapsed;
+ size_t user_time;
+ size_t sys_time;
+ size_t current_rss;
+ size_t peak_rss;
+ size_t current_commit;
+ size_t peak_commit;
+ size_t page_faults;
+ mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults);
+ _mi_fprintf(out, arg, "%10s: %5zu.%03zu s\n", "elapsed", elapsed/1000, elapsed%1000);
+ _mi_fprintf(out, arg, "%10s: user: %zu.%03zu s, system: %zu.%03zu s, faults: %zu, peak rss: ", "process",
+ user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, page_faults );
+ mi_printf_amount((int64_t)peak_rss, 1, out, arg, "%s");
+ if (peak_commit > 0) {
+ _mi_fprintf(out, arg, ", peak commit: ");
+ mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s");
+ }
+ _mi_fprintf(out, arg, "\n");
+}
+
+static mi_msecs_t mi_process_start; // = 0
+
+static mi_stats_t* mi_stats_get_default(void) {
+ mi_heap_t* heap = mi_heap_get_default();
+ return &heap->tld->stats;
+}
+
+static void mi_stats_merge_from(mi_stats_t* stats) {
+ if (stats != &_mi_stats_main) {
+ mi_stats_add(&_mi_stats_main, stats);
+ memset(stats, 0, sizeof(mi_stats_t));
+ }
+}
+
+void mi_stats_reset(void) mi_attr_noexcept {
+ mi_stats_t* stats = mi_stats_get_default();
+ if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); }
+ memset(&_mi_stats_main, 0, sizeof(mi_stats_t));
+ if (mi_process_start == 0) { mi_process_start = _mi_clock_start(); };
+}
+
+void mi_stats_merge(void) mi_attr_noexcept {
+ mi_stats_merge_from( mi_stats_get_default() );
+}
+
+void _mi_stats_merge_thread(mi_tld_t* tld) {
+ mi_stats_merge_from( &tld->stats );
+}
+
+void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done`
+ mi_stats_merge_from(stats);
+}
+
+void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept {
+ mi_stats_merge_from(mi_stats_get_default());
+ _mi_stats_print(&_mi_stats_main, out, arg);
+}
+
+void mi_stats_print(void* out) mi_attr_noexcept {
+ // for compatibility there is an `out` parameter (which can be `stdout` or `stderr`)
+ mi_stats_print_out((mi_output_fun*)out, NULL);
+}
+
+void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept {
+ _mi_stats_print(mi_stats_get_default(), out, arg);
+}
+
+
+// ----------------------------------------------------------------
+// Basic timer for convenience; use milli-seconds to avoid doubles
+// ----------------------------------------------------------------
+
+static mi_msecs_t mi_clock_diff;
+
+mi_msecs_t _mi_clock_now(void) {
+ return _mi_prim_clock_now();
+}
+
+mi_msecs_t _mi_clock_start(void) {
+ if (mi_clock_diff == 0.0) {
+ mi_msecs_t t0 = _mi_clock_now();
+ mi_clock_diff = _mi_clock_now() - t0;
+ }
+ return _mi_clock_now();
+}
+
+mi_msecs_t _mi_clock_end(mi_msecs_t start) {
+ mi_msecs_t end = _mi_clock_now();
+ return (end - start - mi_clock_diff);
+}
+
+
+// --------------------------------------------------------
+// Basic process statistics
+// --------------------------------------------------------
+
+mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept
+{
+ mi_process_info_t pinfo;
+ _mi_memzero_var(pinfo);
+ pinfo.elapsed = _mi_clock_end(mi_process_start);
+ pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
+ pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));
+ pinfo.current_rss = pinfo.current_commit;
+ pinfo.peak_rss = pinfo.peak_commit;
+ pinfo.utime = 0;
+ pinfo.stime = 0;
+ pinfo.page_faults = 0;
+
+ _mi_prim_process_info(&pinfo);
+
+ if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX));
+ if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX));
+ if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));
+ if (current_rss!=NULL) *current_rss = pinfo.current_rss;
+ if (peak_rss!=NULL) *peak_rss = pinfo.peak_rss;
+ if (current_commit!=NULL) *current_commit = pinfo.current_commit;
+ if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit;
+ if (page_faults!=NULL) *page_faults = pinfo.page_faults;
+}
+
+
+// --------------------------------------------------------
+// Return statistics
+// --------------------------------------------------------
+
+void mi_stats_get(size_t stats_size, mi_stats_t* stats) mi_attr_noexcept {
+ if (stats == NULL || stats_size == 0) return;
+ _mi_memzero(stats, stats_size);
+ const size_t size = (stats_size > sizeof(mi_stats_t) ? sizeof(mi_stats_t) : stats_size);
+ _mi_memcpy(stats, &_mi_stats_main, size);
+ stats->version = MI_STAT_VERSION;
+}
+
+
+// --------------------------------------------------------
+// Statics in json format
+// --------------------------------------------------------
+
+typedef struct mi_heap_buf_s {
+ char* buf;
+ size_t size;
+ size_t used;
+ bool can_realloc;
+} mi_heap_buf_t;
+
+static bool mi_heap_buf_expand(mi_heap_buf_t* hbuf) {
+ if (hbuf==NULL) return false;
+ if (hbuf->buf != NULL && hbuf->size>0) {
+ hbuf->buf[hbuf->size-1] = 0;
+ }
+ if (hbuf->size > SIZE_MAX/2 || !hbuf->can_realloc) return false;
+ const size_t newsize = (hbuf->size == 0 ? mi_good_size(12*MI_KiB) : 2*hbuf->size);
+ char* const newbuf = (char*)mi_rezalloc(hbuf->buf, newsize);
+ if (newbuf == NULL) return false;
+ hbuf->buf = newbuf;
+ hbuf->size = newsize;
+ return true;
+}
+
+static void mi_heap_buf_print(mi_heap_buf_t* hbuf, const char* msg) {
+ if (msg==NULL || hbuf==NULL) return;
+ if (hbuf->used + 1 >= hbuf->size && !hbuf->can_realloc) return;
+ for (const char* src = msg; *src != 0; src++) {
+ char c = *src;
+ if (hbuf->used + 1 >= hbuf->size) {
+ if (!mi_heap_buf_expand(hbuf)) return;
+ }
+ mi_assert_internal(hbuf->used < hbuf->size);
+ hbuf->buf[hbuf->used++] = c;
+ }
+ mi_assert_internal(hbuf->used < hbuf->size);
+ hbuf->buf[hbuf->used] = 0;
+}
+
+static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, size_t bin, bool add_comma) {
+ const size_t binsize = _mi_bin_size(bin);
+ const size_t pagesize = (binsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_SMALL_PAGE_SIZE :
+ (binsize <= MI_MEDIUM_OBJ_SIZE_MAX ? MI_MEDIUM_PAGE_SIZE :
+ #if MI_LARGE_PAGE_SIZE
+ (binsize <= MI_LARGE_OBJ_SIZE_MAX ? MI_LARGE_PAGE_SIZE : 0)
+ #else
+ 0
+ #endif
+ ));
+ char buf[128];
+ _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : ""));
+ buf[127] = 0;
+ mi_heap_buf_print(hbuf, buf);
+}
+
+static void mi_heap_buf_print_count(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, bool add_comma) {
+ char buf[128];
+ _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld }%s\n", prefix, stat->total, stat->peak, stat->current, (add_comma ? "," : ""));
+ buf[127] = 0;
+ mi_heap_buf_print(hbuf, buf);
+}
+
+static void mi_heap_buf_print_count_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_count_t* stat) {
+ char buf[128];
+ _mi_snprintf(buf, 128, " \"%s\": ", name);
+ buf[127] = 0;
+ mi_heap_buf_print(hbuf, buf);
+ mi_heap_buf_print_count(hbuf, "", stat, true);
+}
+
+static void mi_heap_buf_print_value(mi_heap_buf_t* hbuf, const char* name, int64_t val) {
+ char buf[128];
+ _mi_snprintf(buf, 128, " \"%s\": %lld,\n", name, val);
+ buf[127] = 0;
+ mi_heap_buf_print(hbuf, buf);
+}
+
+static void mi_heap_buf_print_size(mi_heap_buf_t* hbuf, const char* name, size_t val, bool add_comma) {
+ char buf[128];
+ _mi_snprintf(buf, 128, " \"%s\": %zu%s\n", name, val, (add_comma ? "," : ""));
+ buf[127] = 0;
+ mi_heap_buf_print(hbuf, buf);
+}
+
+static void mi_heap_buf_print_counter_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_counter_t* stat) {
+ mi_heap_buf_print_value(hbuf, name, stat->total);
+}
+
+#define MI_STAT_COUNT(stat) mi_heap_buf_print_count_value(&hbuf, #stat, &stats->stat);
+#define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat);
+
+char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept {
+ mi_heap_buf_t hbuf = { NULL, 0, 0, true };
+ if (output_size > 0 && output_buf != NULL) {
+ _mi_memzero(output_buf, output_size);
+ hbuf.buf = output_buf;
+ hbuf.size = output_size;
+ hbuf.can_realloc = false;
+ }
+ else {
+ if (!mi_heap_buf_expand(&hbuf)) return NULL;
+ }
+ mi_heap_buf_print(&hbuf, "{\n");
+ mi_heap_buf_print_value(&hbuf, "version", MI_STAT_VERSION);
+ mi_heap_buf_print_value(&hbuf, "mimalloc_version", MI_MALLOC_VERSION);
+
+ // process info
+ mi_heap_buf_print(&hbuf, " \"process\": {\n");
+ size_t elapsed;
+ size_t user_time;
+ size_t sys_time;
+ size_t current_rss;
+ size_t peak_rss;
+ size_t current_commit;
+ size_t peak_commit;
+ size_t page_faults;
+ mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults);
+ mi_heap_buf_print_size(&hbuf, "elapsed_msecs", elapsed, true);
+ mi_heap_buf_print_size(&hbuf, "user_msecs", user_time, true);
+ mi_heap_buf_print_size(&hbuf, "system_msecs", sys_time, true);
+ mi_heap_buf_print_size(&hbuf, "page_faults", page_faults, true);
+ mi_heap_buf_print_size(&hbuf, "rss_current", current_rss, true);
+ mi_heap_buf_print_size(&hbuf, "rss_peak", peak_rss, true);
+ mi_heap_buf_print_size(&hbuf, "commit_current", current_commit, true);
+ mi_heap_buf_print_size(&hbuf, "commit_peak", peak_commit, false);
+ mi_heap_buf_print(&hbuf, " },\n");
+
+ // statistics
+ mi_stats_t* stats = &_mi_stats_main;
+ MI_STAT_FIELDS()
+
+ // size bins
+ mi_heap_buf_print(&hbuf, " \"malloc_bins\": [\n");
+ for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
+ mi_heap_buf_print_count_bin(&hbuf, " ", &stats->malloc_bins[i], i, i!=MI_BIN_HUGE);
+ }
+ mi_heap_buf_print(&hbuf, " ],\n");
+ mi_heap_buf_print(&hbuf, " \"page_bins\": [\n");
+ for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
+ mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE);
+ }
+ mi_heap_buf_print(&hbuf, " ]\n");
+ mi_heap_buf_print(&hbuf, "}\n");
+ return hbuf.buf;
+}
diff --git a/compat/mingw-posix.h b/compat/mingw-posix.h
index 2d989fd762474e..9158f89d89d239 100644
--- a/compat/mingw-posix.h
+++ b/compat/mingw-posix.h
@@ -193,8 +193,10 @@ int setitimer(int type, struct itimerval *in, struct itimerval *out);
int sigaction(int sig, struct sigaction *in, struct sigaction *out);
int link(const char *oldpath, const char *newpath);
int uname(struct utsname *buf);
-int symlink(const char *target, const char *link);
int readlink(const char *path, char *buf, size_t bufsiz);
+struct index_state;
+int mingw_create_symlink(struct index_state *index, const char *target, const char *link);
+#define create_symlink mingw_create_symlink
/*
* replacements of existing functions
@@ -288,6 +290,11 @@ int mingw_socket(int domain, int type, int protocol);
int mingw_connect(int sockfd, struct sockaddr *sa, size_t sz);
#define connect mingw_connect
+char *mingw_strerror(int errnum);
+#ifndef _UCRT
+#define strerror mingw_strerror
+#endif
+
int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz);
#define bind mingw_bind
@@ -333,6 +340,17 @@ static inline int getrlimit(int resource, struct rlimit *rlp)
return 0;
}
+/*
+ * The unit of FILETIME is 100-nanoseconds since January 1, 1601, UTC.
+ * Returns the 100-nanoseconds ("hekto nanoseconds") since the epoch.
+ */
+static inline long long filetime_to_hnsec(const FILETIME *ft)
+{
+ long long winTime = ((long long)ft->dwHighDateTime << 32) + ft->dwLowDateTime;
+ /* Windows to Unix Epoch conversion */
+ return winTime - 116444736000000000LL;
+}
+
/*
* Use mingw specific stat()/lstat()/fstat() implementations on Windows,
* including our own struct stat with 64 bit st_size and nanosecond-precision
@@ -349,6 +367,13 @@ struct timespec {
#endif
#endif
+static inline void filetime_to_timespec(const FILETIME *ft, struct timespec *ts)
+{
+ long long hnsec = filetime_to_hnsec(ft);
+ ts->tv_sec = (time_t)(hnsec / 10000000);
+ ts->tv_nsec = (hnsec % 10000000) * 100;
+}
+
struct mingw_stat {
_dev_t st_dev;
_ino_t st_ino;
@@ -381,7 +406,7 @@ int mingw_fstat(int fd, struct stat *buf);
#ifdef lstat
#undef lstat
#endif
-#define lstat mingw_lstat
+extern int (*lstat)(const char *file_name, struct stat *buf);
int mingw_utime(const char *file_name, const struct utimbuf *times);
diff --git a/compat/mingw.c b/compat/mingw.c
index c667a2dcda7ac7..eb7acbefbfa9ac 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -4,17 +4,24 @@
#include "git-compat-util.h"
#include "abspath.h"
#include "alloc.h"
+#include "attr.h"
#include "config.h"
#include "dir.h"
#include "environment.h"
#include "gettext.h"
+#include "repository.h"
#include "run-command.h"
#include "strbuf.h"
+#include "string-list.h"
#include "symlinks.h"
#include "trace2.h"
#include "win32.h"
+#include "win32/exit-process.h"
+#include "win32/fscache.h"
#include "win32/lazyload.h"
+#include "win32/wsl.h"
#include "wrapper.h"
+#include "write-or-die.h"
#include
#include
#include
@@ -272,6 +279,28 @@ enum hide_dotfiles_type {
static enum hide_dotfiles_type hide_dotfiles = HIDE_DOTFILES_DOTGITONLY;
static char *unset_environment_variables;
+int core_fscache;
+
+int are_long_paths_enabled(void)
+{
+ /* default to `false` during initialization */
+ static const int fallback = 0;
+
+ static int enabled = -1;
+
+ if (enabled < 0) {
+ /* avoid infinite recursion */
+ if (!the_repository)
+ return fallback;
+
+ if (the_repository->config &&
+ the_repository->config->hash_initialized &&
+ repo_config_get_bool(the_repository, "core.longpaths", &enabled) < 0)
+ enabled = 0;
+ }
+
+ return enabled < 0 ? fallback : enabled;
+}
int mingw_core_config(const char *var, const char *value,
const struct config_context *ctx UNUSED,
@@ -285,6 +314,11 @@ int mingw_core_config(const char *var, const char *value,
return 0;
}
+ if (!strcmp(var, "core.fscache")) {
+ core_fscache = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "core.unsetenvvars")) {
if (!value)
return config_error_nonbool(var);
@@ -348,7 +382,7 @@ process_phantom_symlink(const wchar_t *wtarget, const wchar_t *wlink)
{
HANDLE hnd;
BY_HANDLE_FILE_INFORMATION fdata;
- wchar_t relative[MAX_PATH];
+ wchar_t relative[MAX_LONG_PATH];
const wchar_t *rel;
/* check that wlink is still a file symlink */
@@ -424,6 +458,54 @@ static void process_phantom_symlinks(void)
LeaveCriticalSection(&phantom_symlinks_cs);
}
+static int create_phantom_symlink(wchar_t *wtarget, wchar_t *wlink)
+{
+ int len;
+
+ /* create file symlink */
+ if (!CreateSymbolicLinkW(wlink, wtarget, symlink_file_flags)) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+
+ /* convert to directory symlink if target exists */
+ switch (process_phantom_symlink(wtarget, wlink)) {
+ case PHANTOM_SYMLINK_RETRY: {
+ /* if target doesn't exist, add to phantom symlinks list */
+ wchar_t wfullpath[MAX_LONG_PATH];
+ struct phantom_symlink_info *psi;
+
+ /* convert to absolute path to be independent of cwd */
+ len = GetFullPathNameW(wlink, MAX_LONG_PATH, wfullpath, NULL);
+ if (!len || len >= MAX_LONG_PATH) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+
+ /* over-allocate and fill phantom_symlink_info structure */
+ psi = xmalloc(sizeof(struct phantom_symlink_info) +
+ sizeof(wchar_t) * (len + wcslen(wtarget) + 2));
+ psi->wlink = (wchar_t *)(psi + 1);
+ wcscpy(psi->wlink, wfullpath);
+ psi->wtarget = psi->wlink + len + 1;
+ wcscpy(psi->wtarget, wtarget);
+
+ EnterCriticalSection(&phantom_symlinks_cs);
+ psi->next = phantom_symlinks;
+ phantom_symlinks = psi;
+ LeaveCriticalSection(&phantom_symlinks_cs);
+ break;
+ }
+ case PHANTOM_SYMLINK_DIRECTORY:
+ /* if we created a dir symlink, process other phantom symlinks */
+ process_phantom_symlinks();
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
/* Normalizes NT paths as returned by some low-level APIs. */
static wchar_t *normalize_ntpath(wchar_t *wbuf)
{
@@ -452,8 +534,8 @@ static wchar_t *normalize_ntpath(wchar_t *wbuf)
int mingw_unlink(const char *pathname, int handle_in_use_error)
{
int tries = 0;
- wchar_t wpathname[MAX_PATH];
- if (xutftowcs_path(wpathname, pathname) < 0)
+ wchar_t wpathname[MAX_LONG_PATH];
+ if (xutftowcs_long_path(wpathname, pathname) < 0)
return -1;
if (DeleteFileW(wpathname))
@@ -485,7 +567,7 @@ static int is_dir_empty(const wchar_t *wpath)
{
WIN32_FIND_DATAW findbuf;
HANDLE handle;
- wchar_t wbuf[MAX_PATH + 2];
+ wchar_t wbuf[MAX_LONG_PATH + 2];
wcscpy(wbuf, wpath);
wcscat(wbuf, L"\\*");
handle = FindFirstFileW(wbuf, &findbuf);
@@ -506,7 +588,7 @@ static int is_dir_empty(const wchar_t *wpath)
int mingw_rmdir(const char *pathname)
{
int tries = 0;
- wchar_t wpathname[MAX_PATH];
+ wchar_t wpathname[MAX_LONG_PATH];
struct stat st;
/*
@@ -528,7 +610,7 @@ int mingw_rmdir(const char *pathname)
return -1;
}
- if (xutftowcs_path(wpathname, pathname) < 0)
+ if (xutftowcs_long_path(wpathname, pathname) < 0)
return -1;
do {
@@ -597,15 +679,18 @@ static int set_hidden_flag(const wchar_t *path, int set)
int mingw_mkdir(const char *path, int mode UNUSED)
{
int ret;
- wchar_t wpath[MAX_PATH];
+ wchar_t wpath[MAX_LONG_PATH];
if (!is_valid_win32_path(path, 0)) {
errno = EINVAL;
return -1;
}
- if (xutftowcs_path(wpath, path) < 0)
+ /* CreateDirectoryW path limit is 248 (MAX_PATH - 8.3 file name) */
+ if (xutftowcs_path_ex(wpath, path, MAX_LONG_PATH, -1, 248,
+ are_long_paths_enabled()) < 0)
return -1;
+
ret = _wmkdir(wpath);
if (!ret)
process_phantom_symlinks();
@@ -766,11 +851,12 @@ static int is_local_named_pipe_path(const char *filename)
int mingw_open (const char *filename, int oflags, ...)
{
+ static int append_atomically = -1;
typedef int (*open_fn_t)(wchar_t const *wfilename, int oflags, ...);
va_list args;
unsigned mode;
int fd, create = (oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL);
- wchar_t wfilename[MAX_PATH];
+ wchar_t wfilename[MAX_LONG_PATH];
open_fn_t open_fn;
WIN32_FILE_ATTRIBUTE_DATA fdata;
@@ -785,7 +871,16 @@ int mingw_open (const char *filename, int oflags, ...)
return -1;
}
- if ((oflags & O_APPEND) && !is_local_named_pipe_path(filename))
+ /*
+ * Only set append_atomically to default value(1) when repo is initialized
+ * and fail to get config value
+ */
+ if (append_atomically < 0 && the_repository && the_repository->commondir &&
+ repo_config_get_bool(the_repository, "windows.appendatomically", &append_atomically))
+ append_atomically = 1;
+
+ if (append_atomically && (oflags & O_APPEND) &&
+ !is_local_named_pipe_path(filename))
open_fn = mingw_open_append;
else if (!(oflags & ~(O_ACCMODE | O_NOINHERIT)))
open_fn = mingw_open_existing;
@@ -794,7 +889,7 @@ int mingw_open (const char *filename, int oflags, ...)
if (filename && !strcmp(filename, "/dev/null"))
wcscpy(wfilename, L"nul");
- else if (xutftowcs_path(wfilename, filename) < 0)
+ else if (xutftowcs_long_path(wfilename, filename) < 0)
return -1;
/*
@@ -827,6 +922,11 @@ int mingw_open (const char *filename, int oflags, ...)
if (fd < 0 && create && GetLastError() == ERROR_ACCESS_DENIED &&
INIT_PROC_ADDR(RtlGetLastNtStatus) && RtlGetLastNtStatus() == STATUS_DELETE_PENDING)
errno = EEXIST;
+ else if ((oflags & O_CREAT) && fd >= 0 && are_wsl_compatible_mode_bits_enabled()) {
+ _mode_t wsl_mode = S_IFREG | (mode&0777);
+ set_wsl_mode_bits_by_handle((HANDLE)_get_osfhandle(fd), wsl_mode);
+ }
+
if (fd < 0 && (oflags & O_ACCMODE) != O_RDONLY && errno == EACCES) {
DWORD attrs = GetFileAttributesW(wfilename);
if (attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY))
@@ -880,14 +980,14 @@ FILE *mingw_fopen (const char *filename, const char *otype)
{
int hide = needs_hiding(filename);
FILE *file;
- wchar_t wfilename[MAX_PATH], wotype[4];
+ wchar_t wfilename[MAX_LONG_PATH], wotype[4];
if (filename && !strcmp(filename, "/dev/null"))
wcscpy(wfilename, L"nul");
else if (!is_valid_win32_path(filename, 1)) {
int create = otype && strchr(otype, 'w');
errno = create ? EINVAL : ENOENT;
return NULL;
- } else if (xutftowcs_path(wfilename, filename) < 0)
+ } else if (xutftowcs_long_path(wfilename, filename) < 0)
return NULL;
if (xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0)
@@ -909,14 +1009,14 @@ FILE *mingw_freopen (const char *filename, const char *otype, FILE *stream)
{
int hide = needs_hiding(filename);
FILE *file;
- wchar_t wfilename[MAX_PATH], wotype[4];
+ wchar_t wfilename[MAX_LONG_PATH], wotype[4];
if (filename && !strcmp(filename, "/dev/null"))
wcscpy(wfilename, L"nul");
else if (!is_valid_win32_path(filename, 1)) {
int create = otype && strchr(otype, 'w');
errno = create ? EINVAL : ENOENT;
return NULL;
- } else if (xutftowcs_path(wfilename, filename) < 0)
+ } else if (xutftowcs_long_path(wfilename, filename) < 0)
return NULL;
if (xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0)
@@ -959,14 +1059,33 @@ ssize_t mingw_write(int fd, const void *buf, size_t len)
{
ssize_t result = write(fd, buf, len);
- if (result < 0 && (errno == EINVAL || errno == ENOSPC) && buf) {
+ if (result < 0 && (errno == EINVAL || errno == EBADF || errno == ENOSPC) && buf) {
int orig = errno;
/* check if fd is a pipe */
HANDLE h = (HANDLE) _get_osfhandle(fd);
- if (GetFileType(h) != FILE_TYPE_PIPE)
+ if (GetFileType(h) != FILE_TYPE_PIPE) {
+ if (orig == EINVAL) {
+ wchar_t path[MAX_LONG_PATH];
+ DWORD ret = GetFinalPathNameByHandleW(h, path,
+ ARRAY_SIZE(path), 0);
+ UINT drive_type = ret > 0 && ret < ARRAY_SIZE(path) ?
+ GetDriveTypeW(path) : DRIVE_UNKNOWN;
+
+ /*
+ * The default atomic append causes such an error on
+ * network file systems, in such a case, it should be
+ * turned off via config.
+ *
+ * `drive_type` of UNC path: DRIVE_NO_ROOT_DIR
+ */
+ if (DRIVE_NO_ROOT_DIR == drive_type || DRIVE_REMOTE == drive_type)
+ warning("invalid write operation detected; you may try:\n"
+ "\n\tgit config windows.appendAtomically false");
+ }
+
errno = orig;
- else if (orig == EINVAL)
+ } else if (orig == EINVAL || errno == EBADF)
errno = EPIPE;
else {
DWORD buf_size;
@@ -984,20 +1103,23 @@ ssize_t mingw_write(int fd, const void *buf, size_t len)
int mingw_access(const char *filename, int mode)
{
- wchar_t wfilename[MAX_PATH];
+ wchar_t wfilename[MAX_LONG_PATH];
if (!strcmp("nul", filename) || !strcmp("/dev/null", filename))
return 0;
- if (xutftowcs_path(wfilename, filename) < 0)
+ if (xutftowcs_long_path(wfilename, filename) < 0)
return -1;
/* X_OK is not supported by the MSVCRT version */
return _waccess(wfilename, mode & ~X_OK);
}
+/* cached length of current directory for handle_long_path */
+static int current_directory_len = 0;
+
int mingw_chdir(const char *dirname)
{
- wchar_t wdirname[MAX_PATH];
-
- if (xutftowcs_path(wdirname, dirname) < 0)
+ int result;
+ wchar_t wdirname[MAX_LONG_PATH];
+ if (xutftowcs_long_path(wdirname, dirname) < 0)
return -1;
if (has_symlinks) {
@@ -1016,35 +1138,19 @@ int mingw_chdir(const char *dirname)
CloseHandle(hnd);
}
- return _wchdir(normalize_ntpath(wdirname));
+ result = _wchdir(normalize_ntpath(wdirname));
+ current_directory_len = GetCurrentDirectoryW(0, NULL);
+ return result;
}
int mingw_chmod(const char *filename, int mode)
{
- wchar_t wfilename[MAX_PATH];
- if (xutftowcs_path(wfilename, filename) < 0)
+ wchar_t wfilename[MAX_LONG_PATH];
+ if (xutftowcs_long_path(wfilename, filename) < 0)
return -1;
return _wchmod(wfilename, mode);
}
-/*
- * The unit of FILETIME is 100-nanoseconds since January 1, 1601, UTC.
- * Returns the 100-nanoseconds ("hekto nanoseconds") since the epoch.
- */
-static inline long long filetime_to_hnsec(const FILETIME *ft)
-{
- long long winTime = ((long long)ft->dwHighDateTime << 32) + ft->dwLowDateTime;
- /* Windows to Unix Epoch conversion */
- return winTime - 116444736000000000LL;
-}
-
-static inline void filetime_to_timespec(const FILETIME *ft, struct timespec *ts)
-{
- long long hnsec = filetime_to_hnsec(ft);
- ts->tv_sec = (time_t)(hnsec / 10000000);
- ts->tv_nsec = (hnsec % 10000000) * 100;
-}
-
/**
* Verifies that safe_create_leading_directories() would succeed.
*/
@@ -1174,8 +1280,8 @@ int mingw_lstat(const char *file_name, struct stat *buf)
WIN32_FILE_ATTRIBUTE_DATA fdata;
DWORD reparse_tag = 0;
int link_len = 0;
- wchar_t wfilename[MAX_PATH];
- int wlen = xutftowcs_path(wfilename, file_name);
+ wchar_t wfilename[MAX_LONG_PATH];
+ int wlen = xutftowcs_long_path(wfilename, file_name);
if (wlen < 0)
return -1;
@@ -1190,7 +1296,7 @@ int mingw_lstat(const char *file_name, struct stat *buf)
if (GetFileAttributesExW(wfilename, GetFileExInfoStandard, &fdata)) {
/* for reparse points, get the link tag and length */
if (fdata.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) {
- char tmpbuf[MAX_PATH];
+ char tmpbuf[MAX_LONG_PATH];
if (read_reparse_point(wfilename, FALSE, tmpbuf,
&link_len, &reparse_tag) < 0)
@@ -1201,13 +1307,18 @@ int mingw_lstat(const char *file_name, struct stat *buf)
buf->st_uid = 0;
buf->st_nlink = 1;
buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes,
- reparse_tag);
+ reparse_tag, file_name);
buf->st_size = S_ISLNK(buf->st_mode) ? link_len :
fdata.nFileSizeLow | (((off_t) fdata.nFileSizeHigh) << 32);
buf->st_dev = buf->st_rdev = 0; /* not used by Git */
filetime_to_timespec(&(fdata.ftLastAccessTime), &(buf->st_atim));
filetime_to_timespec(&(fdata.ftLastWriteTime), &(buf->st_mtim));
filetime_to_timespec(&(fdata.ftCreationTime), &(buf->st_ctim));
+ if (S_ISREG(buf->st_mode) &&
+ are_wsl_compatible_mode_bits_enabled()) {
+ copy_wsl_mode_bits_from_disk(wfilename, -1,
+ &buf->st_mode);
+ }
return 0;
}
@@ -1237,6 +1348,8 @@ int mingw_lstat(const char *file_name, struct stat *buf)
return -1;
}
+int (*lstat)(const char *file_name, struct stat *buf) = mingw_lstat;
+
static int get_file_info_by_handle(HANDLE hnd, struct stat *buf)
{
BY_HANDLE_FILE_INFORMATION fdata;
@@ -1250,24 +1363,26 @@ static int get_file_info_by_handle(HANDLE hnd, struct stat *buf)
buf->st_gid = 0;
buf->st_uid = 0;
buf->st_nlink = 1;
- buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes, 0);
+ buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes, 0, NULL);
buf->st_size = fdata.nFileSizeLow |
(((off_t)fdata.nFileSizeHigh)<<32);
buf->st_dev = buf->st_rdev = 0; /* not used by Git */
filetime_to_timespec(&(fdata.ftLastAccessTime), &(buf->st_atim));
filetime_to_timespec(&(fdata.ftLastWriteTime), &(buf->st_mtim));
filetime_to_timespec(&(fdata.ftCreationTime), &(buf->st_ctim));
+ if (are_wsl_compatible_mode_bits_enabled())
+ get_wsl_mode_bits_by_handle(hnd, &buf->st_mode);
return 0;
}
int mingw_stat(const char *file_name, struct stat *buf)
{
- wchar_t wfile_name[MAX_PATH];
+ wchar_t wfile_name[MAX_LONG_PATH];
HANDLE hnd;
int result;
/* open the file and let Windows resolve the links */
- if (xutftowcs_path(wfile_name, file_name) < 0)
+ if (xutftowcs_long_path(wfile_name, file_name) < 0)
return -1;
hnd = CreateFileW(wfile_name, 0,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL,
@@ -1335,10 +1450,10 @@ int mingw_utime (const char *file_name, const struct utimbuf *times)
FILETIME mft, aft;
int rc;
DWORD attrs;
- wchar_t wfilename[MAX_PATH];
+ wchar_t wfilename[MAX_LONG_PATH];
HANDLE osfilehandle;
- if (xutftowcs_path(wfilename, file_name) < 0)
+ if (xutftowcs_long_path(wfilename, file_name) < 0)
return -1;
/* must have write permission */
@@ -1394,6 +1509,9 @@ int mingw_utime (const char *file_name, const struct utimbuf *times)
size_t mingw_strftime(char *s, size_t max,
const char *format, const struct tm *tm)
{
+#ifdef _UCRT
+ size_t ret = strftime(s, max, format, tm);
+#else
/* a pointer to the original strftime in case we can't find the UCRT version */
static size_t (*fallback)(char *, size_t, const char *, const struct tm *) = strftime;
size_t ret;
@@ -1404,6 +1522,7 @@ size_t mingw_strftime(char *s, size_t max,
ret = strftime(s, max, format, tm);
else
ret = fallback(s, max, format, tm);
+#endif
if (!ret && errno == EINVAL)
die("invalid strftime format: '%s'", format);
@@ -1473,6 +1592,82 @@ struct tm *localtime_r(const time_t *timep, struct tm *result)
}
#endif
+char *mingw_strbuf_realpath(struct strbuf *resolved, const char *path)
+{
+ wchar_t wpath[MAX_PATH];
+ HANDLE h;
+ DWORD ret;
+ int len;
+ const char *last_component = NULL;
+ char *append = NULL;
+
+ if (xutftowcs_path(wpath, path) < 0)
+ return NULL;
+
+ h = CreateFileW(wpath, 0,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL,
+ OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+ /*
+ * strbuf_realpath() allows the last path component to not exist. If
+ * that is the case, now it's time to try without last component.
+ */
+ if (h == INVALID_HANDLE_VALUE &&
+ GetLastError() == ERROR_FILE_NOT_FOUND) {
+ /* cut last component off of `wpath` */
+ wchar_t *p = wpath + wcslen(wpath);
+
+ while (p != wpath)
+ if (*(--p) == L'/' || *p == L'\\')
+ break; /* found start of last component */
+
+ if (p != wpath && (last_component = find_last_dir_sep(path))) {
+ append = xstrdup(last_component + 1); /* skip directory separator */
+ /*
+ * Do not strip the trailing slash at the drive root, otherwise
+ * the path would be e.g. `C:` (which resolves to the
+ * _current_ directory on that drive).
+ */
+ if (p[-1] == L':')
+ p[1] = L'\0';
+ else
+ *p = L'\0';
+ h = CreateFileW(wpath, 0, FILE_SHARE_READ |
+ FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL, OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS, NULL);
+ }
+ }
+
+ if (h == INVALID_HANDLE_VALUE) {
+realpath_failed:
+ FREE_AND_NULL(append);
+ return NULL;
+ }
+
+ ret = GetFinalPathNameByHandleW(h, wpath, ARRAY_SIZE(wpath), 0);
+ CloseHandle(h);
+ if (!ret || ret >= ARRAY_SIZE(wpath))
+ goto realpath_failed;
+
+ len = wcslen(wpath) * 3;
+ strbuf_grow(resolved, len);
+ len = xwcstoutf(resolved->buf, normalize_ntpath(wpath), len);
+ if (len < 0)
+ goto realpath_failed;
+ resolved->len = len;
+
+ if (append) {
+ /* Use forward-slash, like `normalize_ntpath()` */
+ strbuf_complete(resolved, '/');
+ strbuf_addstr(resolved, append);
+ FREE_AND_NULL(append);
+ }
+
+ return resolved->buf;
+
+}
+
char *mingw_getcwd(char *pointer, int len)
{
wchar_t cwd[MAX_PATH], wpointer[MAX_PATH];
@@ -1489,8 +1684,13 @@ char *mingw_getcwd(char *pointer, int len)
if (hnd != INVALID_HANDLE_VALUE) {
ret = GetFinalPathNameByHandleW(hnd, wpointer, ARRAY_SIZE(wpointer), 0);
CloseHandle(hnd);
- if (!ret || ret >= ARRAY_SIZE(wpointer))
- return NULL;
+ if (!ret || ret >= ARRAY_SIZE(wpointer)) {
+ ret = GetLongPathNameW(cwd, wpointer, ARRAY_SIZE(wpointer));
+ if (!ret || ret >= ARRAY_SIZE(wpointer)) {
+ errno = ret ? ENAMETOOLONG : err_win_to_posix(GetLastError());
+ return NULL;
+ }
+ }
if (xwcstoutf(pointer, normalize_ntpath(wpointer), len) < 0)
return NULL;
return pointer;
@@ -1601,7 +1801,7 @@ static const char *quote_arg_msys2(const char *arg)
static const char *parse_interpreter(const char *cmd)
{
- static char buf[100];
+ static char buf[MAX_PATH];
char *p, *opt;
ssize_t n; /* read() can return negative values */
int fd;
@@ -1661,6 +1861,65 @@ static char *lookup_prog(const char *dir, int dirlen, const char *cmd,
return NULL;
}
+static char *path_lookup(const char *cmd, int exe_only);
+
+static char *is_busybox_applet(const char *cmd)
+{
+ static struct string_list applets = STRING_LIST_INIT_DUP;
+ static char *busybox_path;
+ static int busybox_path_initialized;
+
+ /* Avoid infinite loop */
+ if (!strncasecmp(cmd, "busybox", 7) &&
+ (!cmd[7] || !strcasecmp(cmd + 7, ".exe")))
+ return NULL;
+
+ if (!busybox_path_initialized) {
+ busybox_path = path_lookup("busybox.exe", 1);
+ busybox_path_initialized = 1;
+ }
+
+ /* Assume that sh is compiled in... */
+ if (!busybox_path || !strcasecmp(cmd, "sh"))
+ return xstrdup_or_null(busybox_path);
+
+ if (!applets.nr) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ char *p;
+
+ strvec_pushl(&cp.args, busybox_path, "--help", NULL);
+
+ if (capture_command(&cp, &buf, 2048)) {
+ string_list_append(&applets, "");
+ return NULL;
+ }
+
+ /* parse output */
+ p = strstr(buf.buf, "Currently defined functions:\n");
+ if (!p) {
+ warning("Could not parse output of busybox --help");
+ string_list_append(&applets, "");
+ return NULL;
+ }
+ p = strchrnul(p, '\n');
+ for (;;) {
+ size_t len;
+
+ p += strspn(p, "\n\t ,");
+ len = strcspn(p, "\n\t ,");
+ if (!len)
+ break;
+ p[len] = '\0';
+ string_list_insert(&applets, p);
+ p = p + len + 1;
+ }
+ }
+
+ return string_list_has_string(&applets, cmd) ?
+ xstrdup(busybox_path) : NULL;
+}
+
/*
* Determines the absolute path of cmd using the split path in path.
* If cmd contains a slash or backslash, no lookup is performed.
@@ -1689,6 +1948,9 @@ static char *path_lookup(const char *cmd, int exe_only)
path = sep + 1;
}
+ if (!prog && !isexe)
+ prog = is_busybox_applet(cmd);
+
return prog;
}
@@ -1892,8 +2154,8 @@ static int is_msys2_sh(const char *cmd)
}
static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaenv,
- const char *dir,
- int prepend_cmd, int fhin, int fhout, int fherr)
+ const char *dir, const char *prepend_cmd,
+ int fhin, int fhout, int fherr)
{
STARTUPINFOEXW si;
PROCESS_INFORMATION pi;
@@ -1961,6 +2223,10 @@ static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaen
if (*argv && !strcmp(cmd, *argv))
wcmd[0] = L'\0';
+ /*
+ * Paths to executables and to the current directory do not support
+ * long paths, therefore we cannot use xutftowcs_long_path() here.
+ */
else if (xutftowcs_path(wcmd, cmd) < 0)
return -1;
if (dir && xutftowcs_path(wdir, dir) < 0)
@@ -1969,9 +2235,9 @@ static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaen
/* concatenate argv, quoting args as we go */
strbuf_init(&args, 0);
if (prepend_cmd) {
- char *quoted = (char *)quote_arg(cmd);
+ char *quoted = (char *)quote_arg(prepend_cmd);
strbuf_addstr(&args, quoted);
- if (quoted != cmd)
+ if (quoted != prepend_cmd)
free(quoted);
}
for (; *argv; argv++) {
@@ -2091,7 +2357,8 @@ static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaen
return (pid_t)pi.dwProcessId;
}
-static pid_t mingw_spawnv(const char *cmd, const char **argv, int prepend_cmd)
+static pid_t mingw_spawnv(const char *cmd, const char **argv,
+ const char *prepend_cmd)
{
return mingw_spawnve_fd(cmd, argv, NULL, NULL, prepend_cmd, 0, 1, 2);
}
@@ -2119,14 +2386,14 @@ pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **deltaenv,
pid = -1;
}
else {
- pid = mingw_spawnve_fd(iprog, argv, deltaenv, dir, 1,
+ pid = mingw_spawnve_fd(iprog, argv, deltaenv, dir, interpr,
fhin, fhout, fherr);
free(iprog);
}
argv[0] = argv0;
}
else
- pid = mingw_spawnve_fd(prog, argv, deltaenv, dir, 0,
+ pid = mingw_spawnve_fd(prog, argv, deltaenv, dir, NULL,
fhin, fhout, fherr);
free(prog);
}
@@ -2151,7 +2418,7 @@ static int try_shell_exec(const char *cmd, char *const *argv)
argv2[0] = (char *)cmd; /* full path to the script file */
COPY_ARRAY(&argv2[1], &argv[1], argc);
exec_id = trace2_exec(prog, (const char **)argv2);
- pid = mingw_spawnv(prog, (const char **)argv2, 1);
+ pid = mingw_spawnv(prog, (const char **)argv2, interpr);
if (pid >= 0) {
int status;
if (waitpid(pid, &status, 0) < 0)
@@ -2175,7 +2442,7 @@ int mingw_execv(const char *cmd, char *const *argv)
int exec_id;
exec_id = trace2_exec(cmd, (const char **)argv);
- pid = mingw_spawnv(cmd, (const char **)argv, 0);
+ pid = mingw_spawnv(cmd, (const char **)argv, NULL);
if (pid < 0) {
trace2_exec_result(exec_id, -1);
return -1;
@@ -2204,16 +2471,28 @@ int mingw_execvp(const char *cmd, char *const *argv)
int mingw_kill(pid_t pid, int sig)
{
if (pid > 0 && sig == SIGTERM) {
- HANDLE h = OpenProcess(PROCESS_TERMINATE, FALSE, pid);
-
- if (TerminateProcess(h, -1)) {
+ HANDLE h = OpenProcess(PROCESS_CREATE_THREAD |
+ PROCESS_QUERY_INFORMATION |
+ PROCESS_VM_OPERATION | PROCESS_VM_WRITE |
+ PROCESS_VM_READ | PROCESS_TERMINATE,
+ FALSE, pid);
+ int ret;
+
+ if (h)
+ ret = exit_process(h, 128 + sig);
+ else {
+ h = OpenProcess(PROCESS_TERMINATE, FALSE, pid);
+ if (!h) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+ ret = terminate_process_tree(h, 128 + sig);
+ }
+ if (ret) {
+ errno = err_win_to_posix(GetLastError());
CloseHandle(h);
- return 0;
}
-
- errno = err_win_to_posix(GetLastError());
- CloseHandle(h);
- return -1;
+ return ret;
} else if (pid > 0 && sig == 0) {
HANDLE h = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid);
if (h) {
@@ -2337,18 +2616,235 @@ static void ensure_socket_initialization(void)
initialized = 1;
}
+static int winsock_error_to_errno(DWORD err)
+{
+ switch (err) {
+ case WSAEINTR: return EINTR;
+ case WSAEBADF: return EBADF;
+ case WSAEACCES: return EACCES;
+ case WSAEFAULT: return EFAULT;
+ case WSAEINVAL: return EINVAL;
+ case WSAEMFILE: return EMFILE;
+ case WSAEWOULDBLOCK: return EWOULDBLOCK;
+ case WSAEINPROGRESS: return EINPROGRESS;
+ case WSAEALREADY: return EALREADY;
+ case WSAENOTSOCK: return ENOTSOCK;
+ case WSAEDESTADDRREQ: return EDESTADDRREQ;
+ case WSAEMSGSIZE: return EMSGSIZE;
+ case WSAEPROTOTYPE: return EPROTOTYPE;
+ case WSAENOPROTOOPT: return ENOPROTOOPT;
+ case WSAEPROTONOSUPPORT: return EPROTONOSUPPORT;
+ case WSAEOPNOTSUPP: return EOPNOTSUPP;
+ case WSAEAFNOSUPPORT: return EAFNOSUPPORT;
+ case WSAEADDRINUSE: return EADDRINUSE;
+ case WSAEADDRNOTAVAIL: return EADDRNOTAVAIL;
+ case WSAENETDOWN: return ENETDOWN;
+ case WSAENETUNREACH: return ENETUNREACH;
+ case WSAENETRESET: return ENETRESET;
+ case WSAECONNABORTED: return ECONNABORTED;
+ case WSAECONNRESET: return ECONNRESET;
+ case WSAENOBUFS: return ENOBUFS;
+ case WSAEISCONN: return EISCONN;
+ case WSAENOTCONN: return ENOTCONN;
+ case WSAETIMEDOUT: return ETIMEDOUT;
+ case WSAECONNREFUSED: return ECONNREFUSED;
+ case WSAELOOP: return ELOOP;
+ case WSAENAMETOOLONG: return ENAMETOOLONG;
+ case WSAEHOSTUNREACH: return EHOSTUNREACH;
+ case WSAENOTEMPTY: return ENOTEMPTY;
+ /* No errno equivalent; default to EIO */
+ case WSAESOCKTNOSUPPORT:
+ case WSAEPFNOSUPPORT:
+ case WSAESHUTDOWN:
+ case WSAETOOMANYREFS:
+ case WSAEHOSTDOWN:
+ case WSAEPROCLIM:
+ case WSAEUSERS:
+ case WSAEDQUOT:
+ case WSAESTALE:
+ case WSAEREMOTE:
+ case WSASYSNOTREADY:
+ case WSAVERNOTSUPPORTED:
+ case WSANOTINITIALISED:
+ case WSAEDISCON:
+ case WSAENOMORE:
+ case WSAECANCELLED:
+ case WSAEINVALIDPROCTABLE:
+ case WSAEINVALIDPROVIDER:
+ case WSAEPROVIDERFAILEDINIT:
+ case WSASYSCALLFAILURE:
+ case WSASERVICE_NOT_FOUND:
+ case WSATYPE_NOT_FOUND:
+ case WSA_E_NO_MORE:
+ case WSA_E_CANCELLED:
+ case WSAEREFUSED:
+ case WSAHOST_NOT_FOUND:
+ case WSATRY_AGAIN:
+ case WSANO_RECOVERY:
+ case WSANO_DATA:
+ case WSA_QOS_RECEIVERS:
+ case WSA_QOS_SENDERS:
+ case WSA_QOS_NO_SENDERS:
+ case WSA_QOS_NO_RECEIVERS:
+ case WSA_QOS_REQUEST_CONFIRMED:
+ case WSA_QOS_ADMISSION_FAILURE:
+ case WSA_QOS_POLICY_FAILURE:
+ case WSA_QOS_BAD_STYLE:
+ case WSA_QOS_BAD_OBJECT:
+ case WSA_QOS_TRAFFIC_CTRL_ERROR:
+ case WSA_QOS_GENERIC_ERROR:
+ case WSA_QOS_ESERVICETYPE:
+ case WSA_QOS_EFLOWSPEC:
+ case WSA_QOS_EPROVSPECBUF:
+ case WSA_QOS_EFILTERSTYLE:
+ case WSA_QOS_EFILTERTYPE:
+ case WSA_QOS_EFILTERCOUNT:
+ case WSA_QOS_EOBJLENGTH:
+ case WSA_QOS_EFLOWCOUNT:
+#ifndef _MSC_VER
+ case WSA_QOS_EUNKNOWNPSOBJ:
+#endif
+ case WSA_QOS_EPOLICYOBJ:
+ case WSA_QOS_EFLOWDESC:
+ case WSA_QOS_EPSFLOWSPEC:
+ case WSA_QOS_EPSFILTERSPEC:
+ case WSA_QOS_ESDMODEOBJ:
+ case WSA_QOS_ESHAPERATEOBJ:
+ case WSA_QOS_RESERVED_PETYPE:
+ default: return EIO;
+ }
+}
+
+/*
+ * On Windows, `errno` is a global macro to a function call.
+ * This makes it difficult to debug and single-step our mappings.
+ */
+static inline void set_wsa_errno(void)
+{
+ DWORD wsa = WSAGetLastError();
+ int e = winsock_error_to_errno(wsa);
+ errno = e;
+
+#ifdef DEBUG_WSA_ERRNO
+ fprintf(stderr, "winsock error: %d -> %d\n", wsa, e);
+ fflush(stderr);
+#endif
+}
+
+static inline int winsock_return(int ret)
+{
+ if (ret < 0)
+ set_wsa_errno();
+
+ return ret;
+}
+
+#define WINSOCK_RETURN(x) do { return winsock_return(x); } while (0)
+
+#undef strerror
+char *mingw_strerror(int errnum)
+{
+ static char buf[41] ="";
+ switch (errnum) {
+ case EWOULDBLOCK:
+ xsnprintf(buf, 41, "%s", "Operation would block");
+ break;
+ case EINPROGRESS:
+ xsnprintf(buf, 41, "%s", "Operation now in progress");
+ break;
+ case EALREADY:
+ xsnprintf(buf, 41, "%s", "Operation already in progress");
+ break;
+ case ENOTSOCK:
+ xsnprintf(buf, 41, "%s", "Socket operation on non-socket");
+ break;
+ case EDESTADDRREQ:
+ xsnprintf(buf, 41, "%s", "Destination address required");
+ break;
+ case EMSGSIZE:
+ xsnprintf(buf, 41, "%s", "Message too long");
+ break;
+ case EPROTOTYPE:
+ xsnprintf(buf, 41, "%s", "Protocol wrong type for socket");
+ break;
+ case ENOPROTOOPT:
+ xsnprintf(buf, 41, "%s", "Protocol not available");
+ break;
+ case EPROTONOSUPPORT:
+ xsnprintf(buf, 41, "%s", "Protocol not supported");
+ break;
+ case EOPNOTSUPP:
+ xsnprintf(buf, 41, "%s", "Operation not supported");
+ break;
+ case EAFNOSUPPORT:
+ xsnprintf(buf, 41, "%s", "Address family not supported by protocol");
+ break;
+ case EADDRINUSE:
+ xsnprintf(buf, 41, "%s", "Address already in use");
+ break;
+ case EADDRNOTAVAIL:
+ xsnprintf(buf, 41, "%s", "Cannot assign requested address");
+ break;
+ case ENETDOWN:
+ xsnprintf(buf, 41, "%s", "Network is down");
+ break;
+ case ENETUNREACH:
+ xsnprintf(buf, 41, "%s", "Network is unreachable");
+ break;
+ case ENETRESET:
+ xsnprintf(buf, 41, "%s", "Network dropped connection on reset");
+ break;
+ case ECONNABORTED:
+ xsnprintf(buf, 41, "%s", "Software caused connection abort");
+ break;
+ case ECONNRESET:
+ xsnprintf(buf, 41, "%s", "Connection reset by peer");
+ break;
+ case ENOBUFS:
+ xsnprintf(buf, 41, "%s", "No buffer space available");
+ break;
+ case EISCONN:
+ xsnprintf(buf, 41, "%s", "Transport endpoint is already connected");
+ break;
+ case ENOTCONN:
+ xsnprintf(buf, 41, "%s", "Transport endpoint is not connected");
+ break;
+ case ETIMEDOUT:
+ xsnprintf(buf, 41, "%s", "Connection timed out");
+ break;
+ case ECONNREFUSED:
+ xsnprintf(buf, 41, "%s", "Connection refused");
+ break;
+ case ELOOP:
+ xsnprintf(buf, 41, "%s", "Too many levels of symbolic links");
+ break;
+ case EHOSTUNREACH:
+ xsnprintf(buf, 41, "%s", "No route to host");
+ break;
+ default: return strerror(errnum);
+ }
+ return buf;
+}
+
#undef gethostname
int mingw_gethostname(char *name, int namelen)
{
- ensure_socket_initialization();
- return gethostname(name, namelen);
+ ensure_socket_initialization();
+ WINSOCK_RETURN(gethostname(name, namelen));
}
#undef gethostbyname
struct hostent *mingw_gethostbyname(const char *host)
{
+ struct hostent *ret;
+
ensure_socket_initialization();
- return gethostbyname(host);
+
+ ret = gethostbyname(host);
+ if (!ret)
+ set_wsa_errno();
+
+ return ret;
}
#undef getaddrinfo
@@ -2356,7 +2852,7 @@ int mingw_getaddrinfo(const char *node, const char *service,
const struct addrinfo *hints, struct addrinfo **res)
{
ensure_socket_initialization();
- return getaddrinfo(node, service, hints, res);
+ WINSOCK_RETURN(getaddrinfo(node, service, hints, res));
}
int mingw_socket(int domain, int type, int protocol)
@@ -2367,16 +2863,7 @@ int mingw_socket(int domain, int type, int protocol)
ensure_socket_initialization();
s = WSASocket(domain, type, protocol, NULL, 0, 0);
if (s == INVALID_SOCKET) {
- /*
- * WSAGetLastError() values are regular BSD error codes
- * biased by WSABASEERR.
- * However, strerror() does not know about networking
- * specific errors, which are values beginning at 38 or so.
- * Therefore, we choose to leave the biased error code
- * in errno so that _if_ someone looks up the code somewhere,
- * then it is at least the number that are usually listed.
- */
- errno = WSAGetLastError();
+ set_wsa_errno();
return -1;
}
/* convert into a file descriptor */
@@ -2392,35 +2879,35 @@ int mingw_socket(int domain, int type, int protocol)
int mingw_connect(int sockfd, struct sockaddr *sa, size_t sz)
{
SOCKET s = (SOCKET)_get_osfhandle(sockfd);
- return connect(s, sa, sz);
+ WINSOCK_RETURN(connect(s, sa, sz));
}
#undef bind
int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz)
{
SOCKET s = (SOCKET)_get_osfhandle(sockfd);
- return bind(s, sa, sz);
+ WINSOCK_RETURN(bind(s, sa, sz));
}
#undef setsockopt
int mingw_setsockopt(int sockfd, int lvl, int optname, void *optval, int optlen)
{
SOCKET s = (SOCKET)_get_osfhandle(sockfd);
- return setsockopt(s, lvl, optname, (const char*)optval, optlen);
+ WINSOCK_RETURN(setsockopt(s, lvl, optname, (const char*)optval, optlen));
}
#undef shutdown
int mingw_shutdown(int sockfd, int how)
{
SOCKET s = (SOCKET)_get_osfhandle(sockfd);
- return shutdown(s, how);
+ WINSOCK_RETURN(shutdown(s, how));
}
#undef listen
int mingw_listen(int sockfd, int backlog)
{
SOCKET s = (SOCKET)_get_osfhandle(sockfd);
- return listen(s, backlog);
+ WINSOCK_RETURN(listen(s, backlog));
}
#undef accept
@@ -2431,6 +2918,11 @@ int mingw_accept(int sockfd1, struct sockaddr *sa, socklen_t *sz)
SOCKET s1 = (SOCKET)_get_osfhandle(sockfd1);
SOCKET s2 = accept(s1, sa, sz);
+ if (s2 == INVALID_SOCKET) {
+ set_wsa_errno();
+ return -1;
+ }
+
/* convert into a file descriptor */
if ((sockfd2 = _open_osfhandle(s2, O_RDWR|O_BINARY)) < 0) {
int err = errno;
@@ -2445,14 +2937,14 @@ int mingw_accept(int sockfd1, struct sockaddr *sa, socklen_t *sz)
int mingw_rename(const char *pold, const char *pnew)
{
static int supports_file_rename_info_ex = 1;
- DWORD attrs = INVALID_FILE_ATTRIBUTES, gle;
+ DWORD attrs = INVALID_FILE_ATTRIBUTES, gle, attrsold;
int tries = 0;
- wchar_t wpold[MAX_PATH], wpnew[MAX_PATH];
+ wchar_t wpold[MAX_LONG_PATH], wpnew[MAX_LONG_PATH];
int wpnew_len;
- if (xutftowcs_path(wpold, pold) < 0)
+ if (xutftowcs_long_path(wpold, pold) < 0)
return -1;
- wpnew_len = xutftowcs_path(wpnew, pnew);
+ wpnew_len = xutftowcs_long_path(wpnew, pnew);
if (wpnew_len < 0)
return -1;
@@ -2482,9 +2974,9 @@ int mingw_rename(const char *pold, const char *pnew)
* flex array so that the structure has to be allocated on
* the heap. As we declare this structure ourselves though
* we can avoid the allocation and define FileName to have
- * MAX_PATH bytes.
+ * MAX_LONG_PATH bytes.
*/
- WCHAR FileName[MAX_PATH];
+ WCHAR FileName[MAX_LONG_PATH];
} rename_info = { 0 };
HANDLE old_handle = INVALID_HANDLE_VALUE;
BOOL success;
@@ -2537,6 +3029,26 @@ int mingw_rename(const char *pold, const char *pnew)
gle = GetLastError();
}
+ if (gle == ERROR_ACCESS_DENIED) {
+ if (is_inside_windows_container()) {
+ /* Fall back to copy to destination & remove source */
+ if (CopyFileW(wpold, wpnew, FALSE) && !mingw_unlink(pold, 1))
+ return 0;
+ gle = GetLastError();
+ } else if ((attrsold = GetFileAttributesW(wpold)) & FILE_ATTRIBUTE_READONLY) {
+ /* if file is read-only, change and retry */
+ SetFileAttributesW(wpold, attrsold & ~FILE_ATTRIBUTE_READONLY);
+ if (MoveFileExW(wpold, wpnew,
+ MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED)) {
+ SetFileAttributesW(wpnew, attrsold);
+ return 0;
+ }
+ gle = GetLastError();
+ /* revert attribute change on failure */
+ SetFileAttributesW(wpold, attrsold);
+ }
+ }
+
/* revert file attributes on failure */
if (attrs != INVALID_FILE_ATTRIBUTES)
SetFileAttributesW(wpnew, attrs);
@@ -2838,9 +3350,9 @@ int mingw_raise(int sig)
int link(const char *oldpath, const char *newpath)
{
- wchar_t woldpath[MAX_PATH], wnewpath[MAX_PATH];
- if (xutftowcs_path(woldpath, oldpath) < 0 ||
- xutftowcs_path(wnewpath, newpath) < 0)
+ wchar_t woldpath[MAX_LONG_PATH], wnewpath[MAX_LONG_PATH];
+ if (xutftowcs_long_path(woldpath, oldpath) < 0 ||
+ xutftowcs_long_path(wnewpath, newpath) < 0)
return -1;
if (!CreateHardLinkW(wnewpath, woldpath, NULL)) {
@@ -2850,9 +3362,40 @@ int link(const char *oldpath, const char *newpath)
return 0;
}
-int symlink(const char *target, const char *link)
+enum symlink_type {
+ SYMLINK_TYPE_UNSPECIFIED = 0,
+ SYMLINK_TYPE_FILE,
+ SYMLINK_TYPE_DIRECTORY,
+};
+
+static enum symlink_type check_symlink_attr(struct index_state *index, const char *link)
{
- wchar_t wtarget[MAX_PATH], wlink[MAX_PATH];
+ static struct attr_check *check;
+ const char *value;
+
+ if (!index)
+ return SYMLINK_TYPE_UNSPECIFIED;
+
+ if (!check)
+ check = attr_check_initl("symlink", NULL);
+
+ git_check_attr(index, link, check);
+
+ value = check->items[0].value;
+ if (ATTR_UNSET(value))
+ return SYMLINK_TYPE_UNSPECIFIED;
+ if (!strcmp(value, "file"))
+ return SYMLINK_TYPE_FILE;
+ if (!strcmp(value, "dir") || !strcmp(value, "directory"))
+ return SYMLINK_TYPE_DIRECTORY;
+
+ warning(_("ignoring invalid symlink type '%s' for '%s'"), value, link);
+ return SYMLINK_TYPE_UNSPECIFIED;
+}
+
+int mingw_create_symlink(struct index_state *index, const char *target, const char *link)
+{
+ wchar_t wtarget[MAX_LONG_PATH], wlink[MAX_LONG_PATH];
int len;
/* fail if symlinks are disabled or API is not supported (WinXP) */
@@ -2861,8 +3404,8 @@ int symlink(const char *target, const char *link)
return -1;
}
- if ((len = xutftowcs_path(wtarget, target)) < 0
- || xutftowcs_path(wlink, link) < 0)
+ if ((len = xutftowcs_long_path(wtarget, target)) < 0
+ || xutftowcs_long_path(wlink, link) < 0)
return -1;
/* convert target dir separators to backslashes */
@@ -2870,58 +3413,41 @@ int symlink(const char *target, const char *link)
if (wtarget[len] == '/')
wtarget[len] = '\\';
- /* create file symlink */
- if (!CreateSymbolicLinkW(wlink, wtarget, symlink_file_flags)) {
- errno = err_win_to_posix(GetLastError());
- return -1;
- }
-
- /* convert to directory symlink if target exists */
- switch (process_phantom_symlink(wtarget, wlink)) {
- case PHANTOM_SYMLINK_RETRY: {
- /* if target doesn't exist, add to phantom symlinks list */
- wchar_t wfullpath[MAX_PATH];
- struct phantom_symlink_info *psi;
-
- /* convert to absolute path to be independent of cwd */
- len = GetFullPathNameW(wlink, MAX_PATH, wfullpath, NULL);
- if (!len || len >= MAX_PATH) {
- errno = err_win_to_posix(GetLastError());
- return -1;
- }
-
- /* over-allocate and fill phantom_symlink_info structure */
- psi = xmalloc(sizeof(struct phantom_symlink_info)
- + sizeof(wchar_t) * (len + wcslen(wtarget) + 2));
- psi->wlink = (wchar_t *)(psi + 1);
- wcscpy(psi->wlink, wfullpath);
- psi->wtarget = psi->wlink + len + 1;
- wcscpy(psi->wtarget, wtarget);
-
- EnterCriticalSection(&phantom_symlinks_cs);
- psi->next = phantom_symlinks;
- phantom_symlinks = psi;
- LeaveCriticalSection(&phantom_symlinks_cs);
- break;
- }
- case PHANTOM_SYMLINK_DIRECTORY:
- /* if we created a dir symlink, process other phantom symlinks */
+ switch (check_symlink_attr(index, link)) {
+ case SYMLINK_TYPE_UNSPECIFIED:
+ /* Create a phantom symlink: it is initially created as a file
+ * symlink, but may change to a directory symlink later if/when
+ * the target exists. */
+ return create_phantom_symlink(wtarget, wlink);
+ case SYMLINK_TYPE_FILE:
+ if (!CreateSymbolicLinkW(wlink, wtarget, symlink_file_flags))
+ break;
+ return 0;
+ case SYMLINK_TYPE_DIRECTORY:
+ if (!CreateSymbolicLinkW(wlink, wtarget,
+ symlink_directory_flags))
+ break;
+ /* There may be dangling phantom symlinks that point at this
+ * one, which should now morph into directory symlinks. */
process_phantom_symlinks();
- break;
+ return 0;
default:
- break;
+ BUG("unhandled symlink type");
}
- return 0;
+
+ /* CreateSymbolicLinkW failed. */
+ errno = err_win_to_posix(GetLastError());
+ return -1;
}
int readlink(const char *path, char *buf, size_t bufsiz)
{
- WCHAR wpath[MAX_PATH];
- char tmpbuf[MAX_PATH];
+ WCHAR wpath[MAX_LONG_PATH];
+ char tmpbuf[MAX_LONG_PATH];
int len;
DWORD tag;
- if (xutftowcs_path(wpath, path) < 0)
+ if (xutftowcs_long_path(wpath, path) < 0)
return -1;
if (read_reparse_point(wpath, TRUE, tmpbuf, &len, &tag) < 0)
@@ -2990,6 +3516,30 @@ pid_t waitpid(pid_t pid, int *status, int options)
return -1;
}
+int (*win32_is_mount_point)(struct strbuf *path) = mingw_is_mount_point;
+
+int mingw_is_mount_point(struct strbuf *path)
+{
+ WIN32_FIND_DATAW findbuf = { 0 };
+ HANDLE handle;
+ wchar_t wfilename[MAX_LONG_PATH];
+ int wlen = xutftowcs_long_path(wfilename, path->buf);
+ if (wlen < 0)
+ die(_("could not get long path for '%s'"), path->buf);
+
+ /* remove trailing slash, if any */
+ if (wlen > 0 && wfilename[wlen - 1] == L'/')
+ wfilename[--wlen] = L'\0';
+
+ handle = FindFirstFileW(wfilename, &findbuf);
+ if (handle == INVALID_HANDLE_VALUE)
+ return 0;
+ FindClose(handle);
+
+ return (findbuf.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) &&
+ (findbuf.dwReserved0 == IO_REPARSE_TAG_MOUNT_POINT);
+}
+
int xutftowcsn(wchar_t *wcs, const char *utfs, size_t wcslen, int utflen)
{
int upos = 0, wpos = 0;
@@ -3075,6 +3625,57 @@ int xwcstoutf(char *utf, const wchar_t *wcs, size_t utflen)
return -1;
}
+#ifdef ENSURE_MSYSTEM_IS_SET
+#if !defined(RUNTIME_PREFIX) || !defined(HAVE_WPGMPTR) || !defined(MINGW_PREFIX)
+static size_t append_system_bin_dirs(char *path UNUSED, size_t size UNUSED)
+{
+ return 0;
+}
+#else
+static size_t append_system_bin_dirs(char *path, size_t size)
+{
+ char prefix[32768];
+ const char *slash;
+ size_t len = xwcstoutf(prefix, _wpgmptr, sizeof(prefix)), off = 0;
+
+ if (len == 0 || len >= sizeof(prefix) ||
+ !(slash = find_last_dir_sep(prefix)))
+ return 0;
+ /* strip trailing `git.exe` */
+ len = slash - prefix;
+
+ /* strip trailing `cmd` or `\bin` or `bin` or `libexec\git-core` */
+ if (strip_suffix_mem(prefix, &len, "\\" MINGW_PREFIX "\\libexec\\git-core") ||
+ strip_suffix_mem(prefix, &len, "\\" MINGW_PREFIX "\\bin"))
+ off += xsnprintf(path + off, size - off,
+ "%.*s\\" MINGW_PREFIX "\\bin;", (int)len, prefix);
+ else if (strip_suffix_mem(prefix, &len, "\\cmd") ||
+ strip_suffix_mem(prefix, &len, "\\bin") ||
+ strip_suffix_mem(prefix, &len, "\\libexec\\git-core"))
+ off += xsnprintf(path + off, size - off,
+ "%.*s\\" MINGW_PREFIX "\\bin;", (int)len, prefix);
+ else
+ return 0;
+
+ off += xsnprintf(path + off, size - off,
+ "%.*s\\usr\\bin;", (int)len, prefix);
+ return off;
+}
+#endif
+#endif
+
+static int is_system32_path(const char *path)
+{
+ WCHAR system32[MAX_LONG_PATH], wpath[MAX_LONG_PATH];
+
+ if (xutftowcs_long_path(wpath, path) < 0 ||
+ !GetSystemDirectoryW(system32, ARRAY_SIZE(system32)) ||
+ _wcsicmp(system32, wpath))
+ return 0;
+
+ return 1;
+}
+
static void setup_windows_environment(void)
{
char *tmp = getenv("TMPDIR");
@@ -3099,9 +3700,20 @@ static void setup_windows_environment(void)
convert_slashes(tmp);
}
- /* simulate TERM to enable auto-color (see color.c) */
- if (!getenv("TERM"))
- setenv("TERM", "cygwin", 1);
+
+ /*
+ * Make sure TERM is set up correctly to enable auto-color
+ * (see color.c .) Use "cygwin" for older OS releases which
+ * works correctly with MSYS2 utilities on older consoles.
+ */
+ if (!getenv("TERM")) {
+ if ((GetVersion() >> 16) < 15063)
+ setenv("TERM", "cygwin", 0);
+ else {
+ setenv("TERM", "xterm-256color", 0);
+ setenv("COLORTERM", "truecolor", 0);
+ }
+ }
/* calculate HOME if not set */
if (!getenv("HOME")) {
@@ -3115,7 +3727,8 @@ static void setup_windows_environment(void)
strbuf_addstr(&buf, tmp);
if ((tmp = getenv("HOMEPATH"))) {
strbuf_addstr(&buf, tmp);
- if (is_directory(buf.buf))
+ if (!is_system32_path(buf.buf) &&
+ is_directory(buf.buf))
setenv("HOME", buf.buf, 1);
else
tmp = NULL; /* use $USERPROFILE */
@@ -3127,6 +3740,35 @@ static void setup_windows_environment(void)
setenv("HOME", tmp, 1);
}
+ if (!getenv("PLINK_PROTOCOL"))
+ setenv("PLINK_PROTOCOL", "ssh", 0);
+
+#ifdef ENSURE_MSYSTEM_IS_SET
+ if (!(tmp = getenv("MSYSTEM")) || !tmp[0]) {
+ const char *home = getenv("HOME"), *path = getenv("PATH");
+ char buf[32768];
+ size_t off = 0;
+
+ setenv("MSYSTEM", ENSURE_MSYSTEM_IS_SET, 1);
+
+ if (home)
+ off += xsnprintf(buf + off, sizeof(buf) - off,
+ "%s\\bin;", home);
+ off += append_system_bin_dirs(buf + off, sizeof(buf) - off);
+ if (path)
+ off += xsnprintf(buf + off, sizeof(buf) - off,
+ "%s", path);
+ else if (off > 0)
+ buf[off - 1] = '\0';
+ else
+ buf[0] = '\0';
+ setenv("PATH", buf, 1);
+ }
+#endif
+
+ if (!getenv("LC_ALL") && !getenv("LC_CTYPE") && !getenv("LANG"))
+ setenv("LC_CTYPE", "C.UTF-8", 1);
+
/*
* Change 'core.symlinks' default to false, unless native symlinks are
* enabled in MSys2 (via 'MSYS=winsymlinks:nativestrict'). Thus we can
@@ -3250,9 +3892,7 @@ int is_path_owned_by_current_sid(const char *path, struct strbuf *report)
DACL_SECURITY_INFORMATION,
&sid, NULL, NULL, NULL, &descriptor);
- if (err != ERROR_SUCCESS)
- error(_("failed to get owner for '%s' (%ld)"), path, err);
- else if (sid && IsValidSid(sid)) {
+ if (err == ERROR_SUCCESS && sid && IsValidSid(sid)) {
/* Now, verify that the SID matches the current user's */
static PSID current_user_sid;
static HANDLE linked_token;
@@ -3464,6 +4104,73 @@ int is_valid_win32_path(const char *path, int allow_literal_nul)
}
}
+int handle_long_path(wchar_t *path, int len, int max_path, int expand)
+{
+ int result;
+ wchar_t buf[MAX_LONG_PATH];
+
+ /*
+ * we don't need special handling if path is relative to the current
+ * directory, and current directory + path don't exceed the desired
+ * max_path limit. This should cover > 99 % of cases with minimal
+ * performance impact (git almost always uses relative paths).
+ */
+ if ((len < 2 || (!is_dir_sep(path[0]) && path[1] != ':')) &&
+ (current_directory_len + len < max_path))
+ return len;
+
+ /*
+ * handle everything else:
+ * - absolute paths: "C:\dir\file"
+ * - absolute UNC paths: "\\server\share\dir\file"
+ * - absolute paths on current drive: "\dir\file"
+ * - relative paths on other drive: "X:file"
+ * - prefixed paths: "\\?\...", "\\.\..."
+ */
+
+ /* convert to absolute path using GetFullPathNameW */
+ result = GetFullPathNameW(path, MAX_LONG_PATH, buf, NULL);
+ if (!result) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+
+ /*
+ * return absolute path if it fits within max_path (even if
+ * "cwd + path" doesn't due to '..' components)
+ */
+ if (result < max_path) {
+ /* Be careful not to add a drive prefix if there was none */
+ if (is_wdir_sep(path[0]) &&
+ !is_wdir_sep(buf[0]) && buf[1] == L':' && is_wdir_sep(buf[2]))
+ wcscpy(path, buf + 2);
+ else
+ wcscpy(path, buf);
+ return result;
+ }
+
+ /* error out if we shouldn't expand the path or buf is too small */
+ if (!expand || result >= MAX_LONG_PATH - 6) {
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+
+ /* prefix full path with "\\?\" or "\\?\UNC\" */
+ if (buf[0] == '\\') {
+ /* ...unless already prefixed */
+ if (buf[1] == '\\' && (buf[2] == '?' || buf[2] == '.'))
+ return len;
+
+ wcscpy(path, L"\\\\?\\UNC\\");
+ wcscpy(path + 8, buf + 2);
+ return result + 6;
+ } else {
+ wcscpy(path, L"\\\\?\\");
+ wcscpy(path + 4, buf);
+ return result + 4;
+ }
+}
+
#if !defined(_MSC_VER)
/*
* Disable MSVCRT command line wildcard expansion (__getmainargs called from
@@ -3563,7 +4270,14 @@ static void adjust_symlink_flags(void)
symlink_file_flags |= 2;
symlink_directory_flags |= 2;
}
+}
+static BOOL WINAPI handle_ctrl_c(DWORD ctrl_type)
+{
+ if (ctrl_type != CTRL_C_EVENT)
+ return FALSE; /* we did not handle this */
+ mingw_raise(SIGINT);
+ return TRUE; /* we did handle this */
}
#ifdef _MSC_VER
@@ -3600,8 +4314,11 @@ int wmain(int argc, const wchar_t **wargv)
#endif
#endif
+ SetConsoleCtrlHandler(handle_ctrl_c, TRUE);
+
maybe_redirect_std_handles();
adjust_symlink_flags();
+ fsync_object_files = 1;
/* determine size of argv and environ conversion buffer */
maxlen = wcslen(wargv[0]);
@@ -3633,6 +4350,9 @@ int wmain(int argc, const wchar_t **wargv)
InitializeCriticalSection(&pinfo_cs);
InitializeCriticalSection(&phantom_symlinks_cs);
+ /* initialize critical section for fscache */
+ InitializeCriticalSection(&fscache_cs);
+
/* set up default file mode and file modes for stdin/out/err */
_fmode = _O_BINARY;
_setmode(_fileno(stdin), _O_BINARY);
@@ -3642,6 +4362,9 @@ int wmain(int argc, const wchar_t **wargv)
/* initialize Unicode console */
winansi_init();
+ /* init length of current directory for handle_long_path */
+ current_directory_len = GetCurrentDirectoryW(0, NULL);
+
/* invoke the real main() using our utf8 version of argv. */
exit_status = main(argc, argv);
@@ -3686,3 +4409,62 @@ int mingw_have_unix_sockets(void)
return ret;
}
#endif
+
+/*
+ * Based on https://stackoverflow.com/questions/43002803
+ *
+ * [HKLM\SYSTEM\CurrentControlSet\Services\cexecsvc]
+ * "DisplayName"="@%systemroot%\\system32\\cexecsvc.exe,-100"
+ * "ErrorControl"=dword:00000001
+ * "ImagePath"=hex(2):25,00,73,00,79,00,73,00,74,00,65,00,6d,00,72,00,6f,00,
+ * 6f,00,74,00,25,00,5c,00,73,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,
+ * 5c,00,63,00,65,00,78,00,65,00,63,00,73,00,76,00,63,00,2e,00,65,00,78,00,
+ * 65,00,00,00
+ * "Start"=dword:00000002
+ * "Type"=dword:00000010
+ * "Description"="@%systemroot%\\system32\\cexecsvc.exe,-101"
+ * "ObjectName"="LocalSystem"
+ * "ServiceSidType"=dword:00000001
+ */
+int is_inside_windows_container(void)
+{
+ static int inside_container = -1; /* -1 uninitialized */
+ const char *key = "SYSTEM\\CurrentControlSet\\Services\\cexecsvc";
+ HKEY handle = NULL;
+
+ if (inside_container != -1)
+ return inside_container;
+
+ inside_container = ERROR_SUCCESS ==
+ RegOpenKeyExA(HKEY_LOCAL_MACHINE, key, 0, KEY_READ, &handle);
+ RegCloseKey(handle);
+
+ return inside_container;
+}
+
+int file_attr_to_st_mode (DWORD attr, DWORD tag, const char *path)
+{
+ int fMode = S_IREAD;
+ if ((attr & FILE_ATTRIBUTE_REPARSE_POINT) &&
+ tag == IO_REPARSE_TAG_SYMLINK) {
+ int flag = S_IFLNK;
+ char buf[MAX_LONG_PATH];
+
+ /*
+ * Windows containers' mapped volumes are marked as reparse
+ * points and look like symbolic links, but they are not.
+ */
+ if (path && is_inside_windows_container() &&
+ readlink(path, buf, sizeof(buf)) > 27 &&
+ starts_with(buf, "/ContainerMappedDirectories/"))
+ flag = S_IFDIR;
+
+ fMode |= flag;
+ } else if (attr & FILE_ATTRIBUTE_DIRECTORY)
+ fMode |= S_IFDIR;
+ else
+ fMode |= S_IFREG;
+ if (!(attr & FILE_ATTRIBUTE_READONLY))
+ fMode |= S_IWRITE;
+ return fMode;
+}
diff --git a/compat/mingw.h b/compat/mingw.h
index 444daedfa52469..807ee7b7e2e573 100644
--- a/compat/mingw.h
+++ b/compat/mingw.h
@@ -1,5 +1,8 @@
#include "mingw-posix.h"
+extern int core_fscache;
+int are_long_paths_enabled(void);
+
struct config_context;
int mingw_core_config(const char *var, const char *value,
const struct config_context *ctx, void *cb);
@@ -36,9 +39,17 @@ static inline void convert_slashes(char *path)
if (*path == '\\')
*path = '/';
}
+struct strbuf;
+int mingw_is_mount_point(struct strbuf *path);
+extern int (*win32_is_mount_point)(struct strbuf *path);
+#define is_mount_point win32_is_mount_point
+#define CAN_UNLINK_MOUNT_POINTS 1
#define PATH_SEP ';'
char *mingw_query_user_email(void);
#define query_user_email mingw_query_user_email
+struct strbuf;
+char *mingw_strbuf_realpath(struct strbuf *resolved, const char *path);
+#define platform_strbuf_realpath mingw_strbuf_realpath
/**
* Verifies that the specified path is owned by the user running the
@@ -68,6 +79,42 @@ int is_path_owned_by_current_sid(const char *path, struct strbuf *report);
int is_valid_win32_path(const char *path, int allow_literal_nul);
#define is_valid_path(path) is_valid_win32_path(path, 0)
+/**
+ * Max length of long paths (exceeding MAX_PATH). The actual maximum supported
+ * by NTFS is 32,767 (* sizeof(wchar_t)), but we choose an arbitrary smaller
+ * value to limit required stack memory.
+ */
+#define MAX_LONG_PATH 4096
+
+/**
+ * Handles paths that would exceed the MAX_PATH limit of Windows Unicode APIs.
+ *
+ * With expand == false, the function checks for over-long paths and fails
+ * with ENAMETOOLONG. The path parameter is not modified, except if cwd + path
+ * exceeds max_path, but the resulting absolute path doesn't (e.g. due to
+ * eliminating '..' components). The path parameter must point to a buffer
+ * of max_path wide characters.
+ *
+ * With expand == true, an over-long path is automatically converted in place
+ * to an absolute path prefixed with '\\?\', and the new length is returned.
+ * The path parameter must point to a buffer of MAX_LONG_PATH wide characters.
+ *
+ * Parameters:
+ * path: path to check and / or convert
+ * len: size of path on input (number of wide chars without \0)
+ * max_path: max short path length to check (usually MAX_PATH = 260, but just
+ * 248 for CreateDirectoryW)
+ * expand: false to only check the length, true to expand the path to a
+ * '\\?\'-prefixed absolute path
+ *
+ * Return:
+ * length of the resulting path, or -1 on failure
+ *
+ * Errors:
+ * ENAMETOOLONG if path is too long
+ */
+int handle_long_path(wchar_t *path, int len, int max_path, int expand);
+
/**
* Converts UTF-8 encoded string to UTF-16LE.
*
@@ -126,18 +173,46 @@ static inline int xutftowcs(wchar_t *wcs, const char *utf, size_t wcslen)
}
/**
- * Simplified file system specific variant of xutftowcsn, assumes output
- * buffer size is MAX_PATH wide chars and input string is \0-terminated,
- * fails with ENAMETOOLONG if input string is too long.
+ * Simplified file system specific wrapper of xutftowcsn and handle_long_path.
+ * Converts ERANGE to ENAMETOOLONG. If expand is true, wcs must be at least
+ * MAX_LONG_PATH wide chars (see handle_long_path).
*/
-static inline int xutftowcs_path(wchar_t *wcs, const char *utf)
+static inline int xutftowcs_path_ex(wchar_t *wcs, const char *utf,
+ size_t wcslen, int utflen, int max_path, int expand)
{
- int result = xutftowcsn(wcs, utf, MAX_PATH, -1);
+ int result = xutftowcsn(wcs, utf, wcslen, utflen);
if (result < 0 && errno == ERANGE)
errno = ENAMETOOLONG;
+ if (result >= 0)
+ result = handle_long_path(wcs, result, max_path, expand);
return result;
}
+/**
+ * Simplified file system specific variant of xutftowcsn, assumes output
+ * buffer size is MAX_PATH wide chars and input string is \0-terminated,
+ * fails with ENAMETOOLONG if input string is too long. Typically used for
+ * Windows APIs that don't support long paths, e.g. SetCurrentDirectory,
+ * LoadLibrary, CreateProcess...
+ */
+static inline int xutftowcs_path(wchar_t *wcs, const char *utf)
+{
+ return xutftowcs_path_ex(wcs, utf, MAX_PATH, -1, MAX_PATH, 0);
+}
+
+/**
+ * Simplified file system specific variant of xutftowcsn for Windows APIs
+ * that support long paths via '\\?\'-prefix, assumes output buffer size is
+ * MAX_LONG_PATH wide chars, fails with ENAMETOOLONG if input string is too
+ * long. The 'core.longpaths' git-config option controls whether the path
+ * is only checked or expanded to a long path.
+ */
+static inline int xutftowcs_long_path(wchar_t *wcs, const char *utf)
+{
+ return xutftowcs_path_ex(wcs, utf, MAX_LONG_PATH, -1, MAX_PATH,
+ are_long_paths_enabled());
+}
+
/**
* Converts UTF-16LE encoded string to UTF-8.
*
@@ -213,3 +288,8 @@ int mingw_have_unix_sockets(void);
#undef have_unix_sockets
#define have_unix_sockets mingw_have_unix_sockets
#endif
+
+/*
+ * Check current process is inside Windows Container.
+ */
+int is_inside_windows_container(void);
diff --git a/compat/posix.h b/compat/posix.h
index 3c611d2736c47a..1ea8daea7c8ed7 100644
--- a/compat/posix.h
+++ b/compat/posix.h
@@ -45,7 +45,7 @@
#define UNUSED
#endif
-#ifdef __MINGW64__
+#if defined(__MINGW32__) || defined(__MINGW64__)
#define _POSIX_C_SOURCE 1
#elif defined(__sun__)
/*
@@ -70,7 +70,9 @@
#define _ALL_SOURCE 1
#define _GNU_SOURCE 1
#define _BSD_SOURCE 1
+#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE 1
+#endif
#define _NETBSD_SOURCE 1
#define _SGI_SOURCE 1
@@ -177,6 +179,16 @@ typedef unsigned long uintptr_t;
#define _ALL_SOURCE 1
#endif
+#ifdef USE_MIMALLOC
+#include "mimalloc.h"
+#define malloc mi_malloc
+#define calloc mi_calloc
+#define realloc mi_realloc
+#define free mi_free
+#define strdup mi_strdup
+#define strndup mi_strndup
+#endif
+
#ifdef MKDIR_WO_TRAILING_SLASH
#define mkdir(a,b) compat_mkdir_wo_trailing_slash((a),(b))
int compat_mkdir_wo_trailing_slash(const char*, mode_t);
diff --git a/compat/terminal.c b/compat/terminal.c
index 584f27bf7e1078..882b027e41e52b 100644
--- a/compat/terminal.c
+++ b/compat/terminal.c
@@ -418,6 +418,55 @@ static int getchar_with_timeout(int timeout)
return getchar();
}
+static char *shell_prompt(const char *prompt, int echo)
+{
+ const char *read_input[] = {
+ /* Note: call 'bash' explicitly, as 'read -s' is bash-specific */
+ "bash", "-c", echo ?
+ "cat >/dev/tty && read -r line /dev/tty && read -r -s line /dev/tty",
+ NULL
+ };
+ struct child_process child = CHILD_PROCESS_INIT;
+ static struct strbuf buffer = STRBUF_INIT;
+ int prompt_len = strlen(prompt), len = -1, code;
+
+ strvec_pushv(&child.args, read_input);
+ child.in = -1;
+ child.out = -1;
+ child.silent_exec_failure = 1;
+
+ if (start_command(&child))
+ return NULL;
+
+ if (write_in_full(child.in, prompt, prompt_len) != prompt_len) {
+ error("could not write to prompt script");
+ close(child.in);
+ goto ret;
+ }
+ close(child.in);
+
+ strbuf_reset(&buffer);
+ len = strbuf_read(&buffer, child.out, 1024);
+ if (len < 0) {
+ error("could not read from prompt script");
+ goto ret;
+ }
+
+ strbuf_strip_suffix(&buffer, "\n");
+ strbuf_strip_suffix(&buffer, "\r");
+
+ret:
+ close(child.out);
+ code = finish_command(&child);
+ if (code) {
+ error("failed to execute prompt script (exit code %d)", code);
+ return NULL;
+ }
+
+ return len < 0 ? NULL : buffer.buf;
+}
+
#endif
#ifndef FORCE_TEXT
@@ -430,6 +479,15 @@ char *git_terminal_prompt(const char *prompt, int echo)
int r;
FILE *input_fh, *output_fh;
+#ifdef GIT_WINDOWS_NATIVE
+
+ /* try shell_prompt first, fall back to CONIN/OUT if bash is missing */
+ char *result = shell_prompt(prompt, echo);
+ if (result)
+ return result;
+
+#endif
+
input_fh = fopen(INPUT_PATH, "r" FORCE_TEXT);
if (!input_fh)
return NULL;
diff --git a/compat/vcbuild/README b/compat/vcbuild/README
index 29ec1d0f104b80..9ac9760397f479 100644
--- a/compat/vcbuild/README
+++ b/compat/vcbuild/README
@@ -6,7 +6,11 @@ The Steps to Build Git with VS2015 or VS2017 from the command line.
Prompt or from an SDK bash window:
$ cd
- $ ./compat/vcbuild/vcpkg_install.bat
+ $ ./compat/vcbuild/vcpkg_install.bat x64-windows
+
+ or
+
+ $ ./compat/vcbuild/vcpkg_install.bat arm64-windows
The vcpkg tools and all of the third-party sources will be installed
in this folder:
@@ -37,27 +41,17 @@ The Steps to Build Git with VS2015 or VS2017 from the command line.
================================================================
-Alternatively, run `make vcxproj` and then load the generated `git.sln` in
-Visual Studio. The initial build will install the vcpkg system and build the
+Alternatively, just open Git's top-level directory in Visual Studio, via
+`File>Open>Folder...`. This will use CMake internally to generate the
+project definitions. It will also install the vcpkg system and build the
dependencies automatically. This will take a while.
-Instead of generating the `git.sln` file yourself (which requires a full Git
-for Windows SDK), you may want to consider fetching the `vs/master` branch of
-https://github.com/git-for-windows/git instead (which is updated automatically
-via CI running `make vcxproj`). The `vs/master` branch does not require a Git
-for Windows to build, but you can run the test scripts in a regular Git Bash.
-
-Note that `make vcxproj` will automatically add and commit the generated `.sln`
-and `.vcxproj` files to the repo. This is necessary to allow building a
-fully-testable Git in Visual Studio, where a regular Git Bash can be used to
-run the test scripts (as opposed to a full Git for Windows SDK): a number of
-build targets, such as Git commands implemented as Unix shell scripts (where
-`@@SHELL_PATH@@` and other placeholders are interpolated) require a full-blown
-Git for Windows SDK (which is about 10x the size of a regular Git for Windows
-installation).
-
-If your plan is to open a Pull Request with Git for Windows, it is a good idea
-to drop this commit before submitting.
+You can also generate the Visual Studio solution manually by downloading
+and running CMake explicitly rather than letting Visual Studio doing
+that implicitly.
+
+Another, deprecated option is to run `make vcxproj`. This option is
+superseded by the CMake-based build, and will be removed at some point.
================================================================
The Steps of Build Git with VS2008
diff --git a/compat/vcbuild/find_vs_env.bat b/compat/vcbuild/find_vs_env.bat
index b35d264c0e6bed..379b16296e09c2 100644
--- a/compat/vcbuild/find_vs_env.bat
+++ b/compat/vcbuild/find_vs_env.bat
@@ -99,6 +99,7 @@ REM ================================================================
SET sdk_dir=%WindowsSdkDir%
SET sdk_ver=%WindowsSDKVersion%
+ SET sdk_ver_bin_dir=%WindowsSdkVerBinPath%%tgt%
SET si=%sdk_dir%Include\%sdk_ver%
SET sdk_includes=-I"%si%ucrt" -I"%si%um" -I"%si%shared"
SET sl=%sdk_dir%lib\%sdk_ver%
@@ -130,6 +131,7 @@ REM ================================================================
SET sdk_dir=%WindowsSdkDir%
SET sdk_ver=%WindowsSDKVersion%
+ SET sdk_ver_bin_dir=%WindowsSdkVerBinPath%bin\amd64
SET si=%sdk_dir%Include\%sdk_ver%
SET sdk_includes=-I"%si%ucrt" -I"%si%um" -I"%si%shared" -I"%si%winrt"
SET sl=%sdk_dir%lib\%sdk_ver%
@@ -160,6 +162,11 @@ REM ================================================================
echo msvc_includes=%msvc_includes%
echo msvc_libs=%msvc_libs%
+ echo sdk_ver_bin_dir=%sdk_ver_bin_dir%
+ SET X1=%sdk_ver_bin_dir:C:=/C%
+ SET X2=%X1:\=/%
+ echo sdk_ver_bin_dir_msys=%X2%
+
echo sdk_includes=%sdk_includes%
echo sdk_libs=%sdk_libs%
diff --git a/compat/vcbuild/scripts/clink.pl b/compat/vcbuild/scripts/clink.pl
index 3bd824154be381..677d44e46f98d6 100755
--- a/compat/vcbuild/scripts/clink.pl
+++ b/compat/vcbuild/scripts/clink.pl
@@ -15,6 +15,7 @@
my @lflags = ();
my $is_linking = 0;
my $is_debug = 0;
+my $is_gui = 0;
while (@ARGV) {
my $arg = shift @ARGV;
if ("$arg" eq "-DDEBUG") {
@@ -56,7 +57,8 @@
# need to use that instead?
foreach my $flag (@lflags) {
if ($flag =~ /^-LIBPATH:(.*)/) {
- foreach my $l ("libcurl_imp.lib", "libcurl.lib") {
+ my $libcurl = $is_debug ? "libcurl-d.lib" : "libcurl.lib";
+ foreach my $l ("libcurl_imp.lib", $libcurl) {
if (-f "$1/$l") {
$lib = $l;
last;
@@ -66,7 +68,11 @@
}
push(@args, $lib);
} elsif ("$arg" eq "-lexpat") {
+ if ($is_debug) {
+ push(@args, "libexpatd.lib");
+ } else {
push(@args, "libexpat.lib");
+ }
} elsif ("$arg" =~ /^-L/ && "$arg" ne "-LTCG") {
$arg =~ s/^-L/-LIBPATH:/;
push(@lflags, $arg);
@@ -118,11 +124,23 @@
push(@cflags, "-wd4996");
} elsif ("$arg" =~ /^-W[a-z]/) {
# let's ignore those
+ } elsif ("$arg" eq "-fno-stack-protector") {
+ # eat this
+ } elsif ("$arg" eq "-mwindows") {
+ $is_gui = 1;
} else {
push(@args, $arg);
}
}
if ($is_linking) {
+ if ($is_gui) {
+ push(@args, "-ENTRY:wWinMainCRTStartup");
+ push(@args, "-SUBSYSTEM:WINDOWS");
+ } else {
+ push(@args, "-ENTRY:wmainCRTStartup");
+ push(@args, "-SUBSYSTEM:CONSOLE");
+ }
+
push(@args, @lflags);
unshift(@args, "link.exe");
} else {
diff --git a/compat/vcbuild/scripts/rc.pl b/compat/vcbuild/scripts/rc.pl
new file mode 100644
index 00000000000000..7bca4cd81c6c63
--- /dev/null
+++ b/compat/vcbuild/scripts/rc.pl
@@ -0,0 +1,46 @@
+#!/usr/bin/perl -w
+######################################################################
+# Compile Resources on Windows
+#
+# This is a wrapper to facilitate the compilation of Git with MSVC
+# using GNU Make as the build system. So, instead of manipulating the
+# Makefile into something nasty, just to support non-space arguments
+# etc, we use this wrapper to fix the command line options
+#
+######################################################################
+use strict;
+my @args = ();
+my @input = ();
+
+while (@ARGV) {
+ my $arg = shift @ARGV;
+ if ("$arg" =~ /^-[dD]/) {
+ # GIT_VERSION gets passed with too many
+ # layers of dquote escaping.
+ $arg =~ s/\\"/"/g;
+
+ push(@args, $arg);
+
+ } elsif ("$arg" eq "-i") {
+ my $arg = shift @ARGV;
+ # TODO complain if NULL or is dashed ??
+ push(@input, $arg);
+
+ } elsif ("$arg" eq "-o") {
+ my $arg = shift @ARGV;
+ # TODO complain if NULL or is dashed ??
+ push(@args, "-fo$arg");
+
+ } else {
+ push(@args, $arg);
+ }
+}
+
+push(@args, "-nologo");
+push(@args, "-v");
+push(@args, @input);
+
+unshift(@args, "rc.exe");
+printf("**** @args\n");
+
+exit (system(@args) != 0);
diff --git a/compat/vcbuild/vcpkg_copy_dlls.bat b/compat/vcbuild/vcpkg_copy_dlls.bat
index 13661c14f8705c..8bea0cbf83b6cf 100644
--- a/compat/vcbuild/vcpkg_copy_dlls.bat
+++ b/compat/vcbuild/vcpkg_copy_dlls.bat
@@ -15,7 +15,12 @@ REM ================================================================
@FOR /F "delims=" %%D IN ("%~dp0") DO @SET cwd=%%~fD
cd %cwd%
- SET arch=x64-windows
+ SET arch=%2
+ IF NOT DEFINED arch (
+ echo defaulting to 'x64-windows`. Invoke %0 with 'x86-windows', 'x64-windows', or 'arm64-windows'
+ set arch=x64-windows
+ )
+
SET inst=%cwd%vcpkg\installed\%arch%
IF [%1]==[release] (
diff --git a/compat/vcbuild/vcpkg_install.bat b/compat/vcbuild/vcpkg_install.bat
index ebd0bad242a8ca..575c65c20ba307 100644
--- a/compat/vcbuild/vcpkg_install.bat
+++ b/compat/vcbuild/vcpkg_install.bat
@@ -31,11 +31,24 @@ REM ================================================================
SETLOCAL EnableDelayedExpansion
+ SET arch=%1
+ IF NOT DEFINED arch (
+ echo defaulting to 'x64-windows`. Invoke %0 with 'x86-windows', 'x64-windows', or 'arm64-windows'
+ set arch=x64-windows
+ )
+
@FOR /F "delims=" %%D IN ("%~dp0") DO @SET cwd=%%~fD
cd %cwd%
dir vcpkg\vcpkg.exe >nul 2>nul && GOTO :install_libraries
+ git.exe version 2>nul
+ IF ERRORLEVEL 1 (
+ echo "***"
+ echo "Git not found. Please adjust your CMD path or Git install option."
+ echo "***"
+ EXIT /B 1 )
+
echo Fetching vcpkg in %cwd%vcpkg
git.exe clone https://github.com/Microsoft/vcpkg vcpkg
IF ERRORLEVEL 1 ( EXIT /B 1 )
@@ -48,9 +61,8 @@ REM ================================================================
echo Successfully installed %cwd%vcpkg\vcpkg.exe
:install_libraries
- SET arch=x64-windows
- echo Installing third-party libraries...
+ echo Installing third-party libraries(%arch%)...
FOR %%i IN (zlib expat libiconv openssl libssh2 curl) DO (
cd %cwd%vcpkg
IF NOT EXIST "packages\%%i_%arch%" CALL :sub__install_one %%i
@@ -73,8 +85,47 @@ REM ================================================================
:sub__install_one
echo Installing package %1...
- .\vcpkg.exe install %1:%arch%
+ call :%1_features
+
+ REM vcpkg may not be reliable on slow, intermittent or proxy
+ REM connections, see e.g.
+ REM https://social.msdn.microsoft.com/Forums/windowsdesktop/en-US/4a8f7be5-5e15-4213-a7bb-ddf424a954e6/winhttpsendrequest-ends-with-12002-errorhttptimeout-after-21-seconds-no-matter-what-timeout?forum=windowssdk
+ REM which explains the hidden 21 second timeout
+ REM (last post by Dave : Microsoft - Windows Networking team)
+
+ .\vcpkg.exe install %1%features%:%arch%
IF ERRORLEVEL 1 ( EXIT /B 1 )
echo Finished %1
goto :EOF
+
+::
+:: features for each vcpkg to install
+:: there should be an entry here for each package to install
+:: 'set features=' means use the default otherwise
+:: 'set features=[comma-delimited-feature-set]' is the syntax
+::
+
+:zlib_features
+set features=
+goto :EOF
+
+:expat_features
+set features=
+goto :EOF
+
+:libiconv_features
+set features=
+goto :EOF
+
+:openssl_features
+set features=
+goto :EOF
+
+:libssh2_features
+set features=
+goto :EOF
+
+:curl_features
+set features=[core,openssl,schannel]
+goto :EOF
diff --git a/compat/win32.h b/compat/win32.h
index 671bcc81f93351..299f01bdf0f5a4 100644
--- a/compat/win32.h
+++ b/compat/win32.h
@@ -6,19 +6,7 @@
#include
#endif
-static inline int file_attr_to_st_mode (DWORD attr, DWORD tag)
-{
- int fMode = S_IREAD;
- if ((attr & FILE_ATTRIBUTE_REPARSE_POINT) && tag == IO_REPARSE_TAG_SYMLINK)
- fMode |= S_IFLNK;
- else if (attr & FILE_ATTRIBUTE_DIRECTORY)
- fMode |= S_IFDIR;
- else
- fMode |= S_IFREG;
- if (!(attr & FILE_ATTRIBUTE_READONLY))
- fMode |= S_IWRITE;
- return fMode;
-}
+extern int file_attr_to_st_mode (DWORD attr, DWORD tag, const char *path);
static inline int get_file_attr(const char *fname, WIN32_FILE_ATTRIBUTE_DATA *fdata)
{
diff --git a/compat/win32/dirent.c b/compat/win32/dirent.c
index 24ee9b814d6adf..87063101f57202 100644
--- a/compat/win32/dirent.c
+++ b/compat/win32/dirent.c
@@ -1,15 +1,21 @@
#include "../../git-compat-util.h"
-struct DIR {
- struct dirent dd_dir; /* includes d_type */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+typedef struct dirent_DIR {
+ struct DIR base_dir; /* extend base struct DIR */
HANDLE dd_handle; /* FindFirstFile handle */
int dd_stat; /* 0-based index */
-};
+ struct dirent dd_dir; /* includes d_type */
+} dirent_DIR;
+#pragma GCC diagnostic pop
+
+DIR *(*opendir)(const char *dirname) = dirent_opendir;
static inline void finddata2dirent(struct dirent *ent, WIN32_FIND_DATAW *fdata)
{
- /* convert UTF-16 name to UTF-8 */
- xwcstoutf(ent->d_name, fdata->cFileName, sizeof(ent->d_name));
+ /* convert UTF-16 name to UTF-8 (d_name points to dirent_DIR.dd_name) */
+ xwcstoutf(ent->d_name, fdata->cFileName, MAX_PATH * 3);
/* Set file type, based on WIN32_FIND_DATA */
if ((fdata->dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)
@@ -21,41 +27,7 @@ static inline void finddata2dirent(struct dirent *ent, WIN32_FIND_DATAW *fdata)
ent->d_type = DT_REG;
}
-DIR *opendir(const char *name)
-{
- wchar_t pattern[MAX_PATH + 2]; /* + 2 for '/' '*' */
- WIN32_FIND_DATAW fdata;
- HANDLE h;
- int len;
- DIR *dir;
-
- /* convert name to UTF-16 and check length < MAX_PATH */
- if ((len = xutftowcs_path(pattern, name)) < 0)
- return NULL;
-
- /* append optional '/' and wildcard '*' */
- if (len && !is_dir_sep(pattern[len - 1]))
- pattern[len++] = '/';
- pattern[len++] = '*';
- pattern[len] = 0;
-
- /* open find handle */
- h = FindFirstFileW(pattern, &fdata);
- if (h == INVALID_HANDLE_VALUE) {
- DWORD err = GetLastError();
- errno = (err == ERROR_DIRECTORY) ? ENOTDIR : err_win_to_posix(err);
- return NULL;
- }
-
- /* initialize DIR structure and copy first dir entry */
- dir = xmalloc(sizeof(DIR));
- dir->dd_handle = h;
- dir->dd_stat = 0;
- finddata2dirent(&dir->dd_dir, &fdata);
- return dir;
-}
-
-struct dirent *readdir(DIR *dir)
+static struct dirent *dirent_readdir(dirent_DIR *dir)
{
if (!dir) {
errno = EBADF; /* No set_errno for mingw */
@@ -82,7 +54,7 @@ struct dirent *readdir(DIR *dir)
return &dir->dd_dir;
}
-int closedir(DIR *dir)
+static int dirent_closedir(dirent_DIR *dir)
{
if (!dir) {
errno = EBADF;
@@ -93,3 +65,44 @@ int closedir(DIR *dir)
free(dir);
return 0;
}
+
+DIR *dirent_opendir(const char *name)
+{
+ wchar_t pattern[MAX_LONG_PATH + 2]; /* + 2 for "\*" */
+ WIN32_FIND_DATAW fdata;
+ HANDLE h;
+ int len;
+ dirent_DIR *dir;
+
+ /* convert name to UTF-16 and check length */
+ if ((len = xutftowcs_path_ex(pattern, name, MAX_LONG_PATH, -1,
+ MAX_PATH - 2,
+ are_long_paths_enabled())) < 0)
+ return NULL;
+
+ /*
+ * append optional '\' and wildcard '*'. Note: we need to use '\' as
+ * Windows doesn't translate '/' to '\' for "\\?\"-prefixed paths.
+ */
+ if (len && !is_dir_sep(pattern[len - 1]))
+ pattern[len++] = '\\';
+ pattern[len++] = '*';
+ pattern[len] = 0;
+
+ /* open find handle */
+ h = FindFirstFileW(pattern, &fdata);
+ if (h == INVALID_HANDLE_VALUE) {
+ DWORD err = GetLastError();
+ errno = (err == ERROR_DIRECTORY) ? ENOTDIR : err_win_to_posix(err);
+ return NULL;
+ }
+
+ /* initialize DIR structure and copy first dir entry */
+ dir = xmalloc(sizeof(dirent_DIR) + MAX_LONG_PATH);
+ dir->base_dir.preaddir = (struct dirent *(*)(DIR *dir)) dirent_readdir;
+ dir->base_dir.pclosedir = (int (*)(DIR *dir)) dirent_closedir;
+ dir->dd_handle = h;
+ dir->dd_stat = 0;
+ finddata2dirent(&dir->dd_dir, &fdata);
+ return (DIR*) dir;
+}
diff --git a/compat/win32/dirent.h b/compat/win32/dirent.h
index 058207e4bfed62..a58a8075fd70e3 100644
--- a/compat/win32/dirent.h
+++ b/compat/win32/dirent.h
@@ -1,20 +1,34 @@
#ifndef DIRENT_H
#define DIRENT_H
-typedef struct DIR DIR;
-
#define DT_UNKNOWN 0
#define DT_DIR 1
#define DT_REG 2
#define DT_LNK 3
struct dirent {
- unsigned char d_type; /* file type to prevent lstat after readdir */
- char d_name[MAX_PATH * 3]; /* file name (* 3 for UTF-8 conversion) */
+ unsigned char d_type; /* file type to prevent lstat after readdir */
+ char d_name[/* FLEX_ARRAY */]; /* file name */
};
-DIR *opendir(const char *dirname);
-struct dirent *readdir(DIR *dir);
-int closedir(DIR *dir);
+/*
+ * Base DIR structure, contains pointers to readdir/closedir implementations so
+ * that opendir may choose a concrete implementation on a call-by-call basis.
+ */
+typedef struct DIR {
+ struct dirent *(*preaddir)(struct DIR *dir);
+ int (*pclosedir)(struct DIR *dir);
+} DIR;
+
+/* default dirent implementation */
+extern DIR *dirent_opendir(const char *dirname);
+
+#define opendir git_opendir
+
+/* current dirent implementation */
+extern DIR *(*opendir)(const char *dirname);
+
+#define readdir(dir) (dir->preaddir(dir))
+#define closedir(dir) (dir->pclosedir(dir))
#endif /* DIRENT_H */
diff --git a/compat/win32/exit-process.h b/compat/win32/exit-process.h
new file mode 100644
index 00000000000000..d53989884cfb0c
--- /dev/null
+++ b/compat/win32/exit-process.h
@@ -0,0 +1,165 @@
+#ifndef EXIT_PROCESS_H
+#define EXIT_PROCESS_H
+
+/*
+ * This file contains functions to terminate a Win32 process, as gently as
+ * possible.
+ *
+ * At first, we will attempt to inject a thread that calls ExitProcess(). If
+ * that fails, we will fall back to terminating the entire process tree.
+ *
+ * For simplicity, these functions are marked as file-local.
+ */
+
+#include
+
+/*
+ * Terminates the process corresponding to the process ID and all of its
+ * directly and indirectly spawned subprocesses.
+ *
+ * This way of terminating the processes is not gentle: the processes get
+ * no chance of cleaning up after themselves (closing file handles, removing
+ * .lock files, terminating spawned processes (if any), etc).
+ */
+static int terminate_process_tree(HANDLE main_process, int exit_status)
+{
+ HANDLE snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+ PROCESSENTRY32 entry;
+ DWORD pids[16384];
+ int max_len = sizeof(pids) / sizeof(*pids), i, len, ret = 0;
+ pid_t pid = GetProcessId(main_process);
+
+ pids[0] = (DWORD)pid;
+ len = 1;
+
+ /*
+ * Even if Process32First()/Process32Next() seem to traverse the
+ * processes in topological order (i.e. parent processes before
+ * child processes), there is nothing in the Win32 API documentation
+ * suggesting that this is guaranteed.
+ *
+ * Therefore, run through them at least twice and stop when no more
+ * process IDs were added to the list.
+ */
+ for (;;) {
+ int orig_len = len;
+
+ memset(&entry, 0, sizeof(entry));
+ entry.dwSize = sizeof(entry);
+
+ if (!Process32First(snapshot, &entry))
+ break;
+
+ do {
+ for (i = len - 1; i >= 0; i--) {
+ if (pids[i] == entry.th32ProcessID)
+ break;
+ if (pids[i] == entry.th32ParentProcessID)
+ pids[len++] = entry.th32ProcessID;
+ }
+ } while (len < max_len && Process32Next(snapshot, &entry));
+
+ if (orig_len == len || len >= max_len)
+ break;
+ }
+
+ for (i = len - 1; i > 0; i--) {
+ HANDLE process = OpenProcess(PROCESS_TERMINATE, FALSE, pids[i]);
+
+ if (process) {
+ if (!TerminateProcess(process, exit_status))
+ ret = -1;
+ CloseHandle(process);
+ }
+ }
+ if (!TerminateProcess(main_process, exit_status))
+ ret = -1;
+ CloseHandle(main_process);
+
+ return ret;
+}
+
+/**
+ * Determine whether a process runs in the same architecture as the current
+ * one. That test is required before we assume that GetProcAddress() returns
+ * a valid address *for the target process*.
+ */
+static inline int process_architecture_matches_current(HANDLE process)
+{
+ static BOOL current_is_wow = -1;
+ BOOL is_wow;
+
+ if (current_is_wow == -1 &&
+ !IsWow64Process (GetCurrentProcess(), ¤t_is_wow))
+ current_is_wow = -2;
+ if (current_is_wow == -2)
+ return 0; /* could not determine current process' WoW-ness */
+ if (!IsWow64Process (process, &is_wow))
+ return 0; /* cannot determine */
+ return is_wow == current_is_wow;
+}
+
+/**
+ * Inject a thread into the given process that runs ExitProcess().
+ *
+ * Note: as kernel32.dll is loaded before any process, the other process and
+ * this process will have ExitProcess() at the same address.
+ *
+ * This function expects the process handle to have the access rights for
+ * CreateRemoteThread(): PROCESS_CREATE_THREAD, PROCESS_QUERY_INFORMATION,
+ * PROCESS_VM_OPERATION, PROCESS_VM_WRITE, and PROCESS_VM_READ.
+ *
+ * The idea comes from the Dr Dobb's article "A Safer Alternative to
+ * TerminateProcess()" by Andrew Tucker (July 1, 1999),
+ * http://www.drdobbs.com/a-safer-alternative-to-terminateprocess/184416547
+ *
+ * If this method fails, we fall back to running terminate_process_tree().
+ */
+static int exit_process(HANDLE process, int exit_code)
+{
+ DWORD code;
+
+ if (GetExitCodeProcess(process, &code) && code == STILL_ACTIVE) {
+ static int initialized;
+ static LPTHREAD_START_ROUTINE exit_process_address;
+ PVOID arg = (PVOID)(intptr_t)exit_code;
+ DWORD thread_id;
+ HANDLE thread = NULL;
+
+ if (!initialized) {
+ HINSTANCE kernel32 = GetModuleHandleA("kernel32");
+ if (!kernel32)
+ die("BUG: cannot find kernel32");
+ exit_process_address =
+ (LPTHREAD_START_ROUTINE)(void (*)(void))
+ GetProcAddress(kernel32, "ExitProcess");
+ initialized = 1;
+ }
+ if (!exit_process_address ||
+ !process_architecture_matches_current(process))
+ return terminate_process_tree(process, exit_code);
+
+ thread = CreateRemoteThread(process, NULL, 0,
+ exit_process_address,
+ arg, 0, &thread_id);
+ if (thread) {
+ CloseHandle(thread);
+ /*
+ * If the process survives for 10 seconds (a completely
+ * arbitrary value picked from thin air), fall back to
+ * killing the process tree via TerminateProcess().
+ */
+ if (WaitForSingleObject(process, 10000) ==
+ WAIT_OBJECT_0) {
+ CloseHandle(process);
+ return 0;
+ }
+ }
+
+ return terminate_process_tree(process, exit_code);
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/compat/win32/fscache.c b/compat/win32/fscache.c
new file mode 100644
index 00000000000000..cbd90ececf6b37
--- /dev/null
+++ b/compat/win32/fscache.c
@@ -0,0 +1,820 @@
+#include "../../git-compat-util.h"
+#include "../../hashmap.h"
+#include "../win32.h"
+#include "fscache.h"
+#include "../../dir.h"
+#include "../../abspath.h"
+#include "../../trace.h"
+#include "config.h"
+#include "../../mem-pool.h"
+#include "ntifs.h"
+#include "wsl.h"
+
+static volatile long initialized;
+static DWORD dwTlsIndex;
+CRITICAL_SECTION fscache_cs;
+
+/*
+ * Store one fscache per thread to avoid thread contention and locking.
+ * This is ok because multi-threaded access is 1) uncommon and 2) always
+ * splitting up the cache entries across multiple threads so there isn't
+ * any overlap between threads anyway.
+ */
+struct fscache {
+ volatile long enabled;
+ struct hashmap map;
+ struct mem_pool mem_pool;
+ unsigned int lstat_requests;
+ unsigned int opendir_requests;
+ unsigned int fscache_requests;
+ unsigned int fscache_misses;
+ /*
+ * 32k wide characters translates to 64kB, which is the maximum that
+ * Windows 8.1 and earlier can handle. On network drives, not only
+ * the client's Windows version matters, but also the server's,
+ * therefore we need to keep this to 64kB.
+ */
+ WCHAR buffer[32 * 1024];
+};
+static struct trace_key trace_fscache = TRACE_KEY_INIT(FSCACHE);
+
+/*
+ * An entry in the file system cache. Used for both entire directory listings
+ * and file entries.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+struct fsentry {
+ struct hashmap_entry ent;
+ mode_t st_mode;
+ ULONG reparse_tag;
+ /* Pointer to the directory listing, or NULL for the listing itself. */
+ struct fsentry *list;
+ /* Pointer to the next file entry of the list. */
+ struct fsentry *next;
+
+ union {
+ /* Reference count of the directory listing. */
+ volatile long refcnt;
+ struct {
+ /* More stat members (only used for file entries). */
+ off64_t st_size;
+ struct timespec st_atim;
+ struct timespec st_mtim;
+ struct timespec st_ctim;
+ } s;
+ } u;
+
+ /* Length of name. */
+ unsigned short len;
+ /*
+ * Name of the entry. For directory listings: relative path of the
+ * directory, without trailing '/' (empty for cwd()). For file entries:
+ * name of the file. Typically points to the end of the structure if
+ * the fsentry is allocated on the heap (see fsentry_alloc), or to a
+ * local variable if on the stack (see fsentry_init).
+ */
+ struct dirent dirent;
+};
+#pragma GCC diagnostic pop
+
+#pragma GCC diagnostic push
+#ifdef __clang__
+#pragma GCC diagnostic ignored "-Wflexible-array-extensions"
+#endif
+struct heap_fsentry {
+ union {
+ struct fsentry ent;
+ char dummy[sizeof(struct fsentry) + MAX_LONG_PATH];
+ } u;
+};
+#pragma GCC diagnostic pop
+
+/*
+ * Compares the paths of two fsentry structures for equality.
+ */
+static int fsentry_cmp(void *cmp_data UNUSED,
+ const struct fsentry *fse1, const struct fsentry *fse2,
+ void *keydata UNUSED)
+{
+ int res;
+ if (fse1 == fse2)
+ return 0;
+
+ /* compare the list parts first */
+ if (fse1->list != fse2->list &&
+ (res = fsentry_cmp(NULL, fse1->list ? fse1->list : fse1,
+ fse2->list ? fse2->list : fse2, NULL)))
+ return res;
+
+ /* if list parts are equal, compare len and name */
+ if (fse1->len != fse2->len)
+ return fse1->len - fse2->len;
+ return fspathncmp(fse1->dirent.d_name, fse2->dirent.d_name, fse1->len);
+}
+
+/*
+ * Calculates the hash code of an fsentry structure's path.
+ */
+static unsigned int fsentry_hash(const struct fsentry *fse)
+{
+ unsigned int hash = fse->list ? fse->list->ent.hash : 0;
+ return hash ^ memihash(fse->dirent.d_name, fse->len);
+}
+
+/*
+ * Initialize an fsentry structure for use by fsentry_hash and fsentry_cmp.
+ */
+static void fsentry_init(struct fsentry *fse, struct fsentry *list,
+ const char *name, size_t len)
+{
+ fse->list = list;
+ if (len > MAX_LONG_PATH)
+ BUG("Trying to allocate fsentry for long path '%.*s'",
+ (int)len, name);
+ memcpy(fse->dirent.d_name, name, len);
+ fse->dirent.d_name[len] = 0;
+ fse->len = len;
+ hashmap_entry_init(&fse->ent, fsentry_hash(fse));
+}
+
+/*
+ * Allocate an fsentry structure on the heap.
+ */
+static struct fsentry *fsentry_alloc(struct fscache *cache, struct fsentry *list, const char *name,
+ size_t len)
+{
+ /* overallocate fsentry and copy the name to the end */
+ struct fsentry *fse =
+ mem_pool_alloc(&cache->mem_pool, sizeof(*fse) + len + 1);
+ /* init the rest of the structure */
+ fsentry_init(fse, list, name, len);
+ fse->next = NULL;
+ fse->u.refcnt = 1;
+ return fse;
+}
+
+/*
+ * Add a reference to an fsentry.
+ */
+inline static void fsentry_addref(struct fsentry *fse)
+{
+ if (fse->list)
+ fse = fse->list;
+
+ InterlockedIncrement(&(fse->u.refcnt));
+}
+
+/*
+ * Release the reference to an fsentry.
+ */
+static void fsentry_release(struct fsentry *fse)
+{
+ if (fse->list)
+ fse = fse->list;
+
+ InterlockedDecrement(&(fse->u.refcnt));
+}
+
+static int xwcstoutfn(char *utf, int utflen, const wchar_t *wcs, int wcslen)
+{
+ if (!wcs || !utf || utflen < 1) {
+ errno = EINVAL;
+ return -1;
+ }
+ utflen = WideCharToMultiByte(CP_UTF8, 0, wcs, wcslen, utf, utflen, NULL, NULL);
+ if (utflen)
+ return utflen;
+ errno = ERANGE;
+ return -1;
+}
+
+/*
+ * Allocate and initialize an fsentry from a FILE_FULL_DIR_INFORMATION structure.
+ */
+static struct fsentry *fseentry_create_entry(struct fscache *cache,
+ struct fsentry *list,
+ PFILE_FULL_DIR_INFORMATION fdata)
+{
+ char buf[MAX_PATH * 3];
+ int len;
+ struct fsentry *fse;
+
+ len = xwcstoutfn(buf, ARRAY_SIZE(buf), fdata->FileName, fdata->FileNameLength / sizeof(wchar_t));
+
+ fse = fsentry_alloc(cache, list, buf, len);
+
+ fse->reparse_tag =
+ fdata->FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT ?
+ fdata->EaSize : 0;
+
+ /*
+ * On certain Windows versions, host directories mapped into
+ * Windows Containers ("Volumes", see https://docs.docker.com/storage/volumes/)
+ * look like symbolic links, but their targets are paths that
+ * are valid only in kernel mode.
+ *
+ * Let's work around this by detecting that situation and
+ * telling Git that these are *not* symbolic links.
+ */
+ if (fse->reparse_tag == IO_REPARSE_TAG_SYMLINK &&
+ sizeof(buf) > (size_t)(list ? list->len + 1 : 0) + fse->len + 1 &&
+ is_inside_windows_container()) {
+ size_t off = 0;
+ if (list) {
+ memcpy(buf, list->dirent.d_name, list->len);
+ buf[list->len] = '/';
+ off = list->len + 1;
+ }
+ memcpy(buf + off, fse->dirent.d_name, fse->len);
+ buf[off + fse->len] = '\0';
+ }
+
+ fse->st_mode = file_attr_to_st_mode(fdata->FileAttributes,
+ fdata->EaSize, buf);
+ fse->dirent.d_type = S_ISREG(fse->st_mode) ? DT_REG :
+ S_ISDIR(fse->st_mode) ? DT_DIR : DT_LNK;
+ fse->u.s.st_size = S_ISLNK(fse->st_mode) ? MAX_LONG_PATH :
+ fdata->EndOfFile.LowPart |
+ (((off_t)fdata->EndOfFile.HighPart) << 32);
+ filetime_to_timespec((FILETIME *)&(fdata->LastAccessTime),
+ &(fse->u.s.st_atim));
+ filetime_to_timespec((FILETIME *)&(fdata->LastWriteTime),
+ &(fse->u.s.st_mtim));
+ filetime_to_timespec((FILETIME *)&(fdata->CreationTime),
+ &(fse->u.s.st_ctim));
+ if (fdata->EaSize > 0 &&
+ sizeof(buf) >= (size_t)(list ? list->len+1 : 0) + fse->len+1 &&
+ are_wsl_compatible_mode_bits_enabled()) {
+ size_t off = 0;
+ wchar_t wpath[MAX_LONG_PATH];
+ if (list && list->len) {
+ memcpy(buf, list->dirent.d_name, list->len);
+ buf[list->len] = '/';
+ off = list->len + 1;
+ }
+ memcpy(buf + off, fse->dirent.d_name, fse->len);
+ buf[off + fse->len] = '\0';
+ if (xutftowcs_long_path(wpath, buf) >= 0)
+ copy_wsl_mode_bits_from_disk(wpath, -1, &fse->st_mode);
+ }
+
+ return fse;
+}
+
+/*
+ * Create an fsentry-based directory listing (similar to opendir / readdir).
+ * Dir should not contain trailing '/'. Use an empty string for the current
+ * directory (not "."!).
+ */
+static struct fsentry *fsentry_create_list(struct fscache *cache, const struct fsentry *dir,
+ int *dir_not_found)
+{
+ wchar_t pattern[MAX_LONG_PATH];
+ NTSTATUS status;
+ IO_STATUS_BLOCK iosb;
+ PFILE_FULL_DIR_INFORMATION di;
+ HANDLE h;
+ int wlen;
+ struct fsentry *list, **phead;
+ DWORD err;
+
+ *dir_not_found = 0;
+
+ /* convert name to UTF-16 and check length */
+ if ((wlen = xutftowcs_path_ex(pattern, dir->dirent.d_name,
+ MAX_LONG_PATH, dir->len, MAX_PATH - 2,
+ are_long_paths_enabled())) < 0)
+ return NULL;
+
+ /* handle CWD */
+ if (!wlen) {
+ wlen = GetCurrentDirectoryW(ARRAY_SIZE(pattern), pattern);
+ if (!wlen || wlen >= (ssize_t)ARRAY_SIZE(pattern)) {
+ errno = wlen ? ENAMETOOLONG : err_win_to_posix(GetLastError());
+ return NULL;
+ }
+ }
+
+ h = CreateFileW(pattern, FILE_LIST_DIRECTORY,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+ if (h == INVALID_HANDLE_VALUE) {
+ err = GetLastError();
+ *dir_not_found = 1; /* or empty directory */
+ errno = (err == ERROR_DIRECTORY) ? ENOTDIR : err_win_to_posix(err);
+ trace_printf_key(&trace_fscache, "fscache: error(%d) '%s'\n",
+ errno, dir->dirent.d_name);
+ return NULL;
+ }
+
+ /* allocate object to hold directory listing */
+ list = fsentry_alloc(cache, NULL, dir->dirent.d_name, dir->len);
+ list->st_mode = S_IFDIR;
+ list->dirent.d_type = DT_DIR;
+
+ /* walk directory and build linked list of fsentry structures */
+ phead = &list->next;
+ status = NtQueryDirectoryFile(h, NULL, 0, 0, &iosb, cache->buffer,
+ sizeof(cache->buffer), FileFullDirectoryInformation, FALSE, NULL, FALSE);
+ if (!NT_SUCCESS(status)) {
+ /*
+ * NtQueryDirectoryFile returns STATUS_INVALID_PARAMETER when
+ * asked to enumerate an invalid directory (ie it is a file
+ * instead of a directory). Verify that is the actual cause
+ * of the error.
+ */
+ if (status == (NTSTATUS)STATUS_INVALID_PARAMETER) {
+ DWORD attributes = GetFileAttributesW(pattern);
+ if (!(attributes & FILE_ATTRIBUTE_DIRECTORY))
+ status = ERROR_DIRECTORY;
+ }
+ goto Error;
+ }
+ di = (PFILE_FULL_DIR_INFORMATION)(cache->buffer);
+ for (;;) {
+
+ *phead = fseentry_create_entry(cache, list, di);
+ phead = &(*phead)->next;
+
+ /* If there is no offset in the entry, the buffer has been exhausted. */
+ if (di->NextEntryOffset == 0) {
+ status = NtQueryDirectoryFile(h, NULL, 0, 0, &iosb, cache->buffer,
+ sizeof(cache->buffer), FileFullDirectoryInformation, FALSE, NULL, FALSE);
+ if (!NT_SUCCESS(status)) {
+ if (status == STATUS_NO_MORE_FILES)
+ break;
+ goto Error;
+ }
+
+ di = (PFILE_FULL_DIR_INFORMATION)(cache->buffer);
+ continue;
+ }
+
+ /* Advance to the next entry. */
+ di = (PFILE_FULL_DIR_INFORMATION)(((PUCHAR)di) + di->NextEntryOffset);
+ }
+
+ CloseHandle(h);
+ return list;
+
+Error:
+ trace_printf_key(&trace_fscache,
+ "fscache: status(%ld) unable to query directory "
+ "contents '%s'\n", status, dir->dirent.d_name);
+ CloseHandle(h);
+ fsentry_release(list);
+ return NULL;
+}
+
+/*
+ * Adds a directory listing to the cache.
+ */
+static void fscache_add(struct fscache *cache, struct fsentry *fse)
+{
+ if (fse->list)
+ fse = fse->list;
+
+ for (; fse; fse = fse->next)
+ hashmap_add(&cache->map, &fse->ent);
+}
+
+/*
+ * Clears the cache.
+ */
+static void fscache_clear(struct fscache *cache)
+{
+ mem_pool_discard(&cache->mem_pool, 0);
+ mem_pool_init(&cache->mem_pool, 0);
+ hashmap_clear(&cache->map);
+ hashmap_init(&cache->map, (hashmap_cmp_fn)fsentry_cmp, NULL, 0);
+ cache->lstat_requests = cache->opendir_requests = 0;
+ cache->fscache_misses = cache->fscache_requests = 0;
+}
+
+/*
+ * Checks if the cache is enabled for the given path.
+ */
+static int do_fscache_enabled(struct fscache *cache, const char *path)
+{
+ return cache->enabled > 0 && !is_absolute_path(path);
+}
+
+int fscache_enabled(const char *path)
+{
+ struct fscache *cache = fscache_getcache();
+
+ return cache ? do_fscache_enabled(cache, path) : 0;
+}
+
+/*
+ * Looks up or creates a cache entry for the specified key.
+ */
+static struct fsentry *fscache_get(struct fscache *cache, struct fsentry *key)
+{
+ struct fsentry *fse;
+ int dir_not_found;
+
+ cache->fscache_requests++;
+ /* check if entry is in cache */
+ fse = hashmap_get_entry(&cache->map, key, ent, NULL);
+ if (fse) {
+ if (fse->st_mode)
+ fsentry_addref(fse);
+ else
+ fse = NULL; /* non-existing directory */
+ return fse;
+ }
+ /* if looking for a file, check if directory listing is in cache */
+ if (!fse && key->list) {
+ fse = hashmap_get_entry(&cache->map, key->list, ent, NULL);
+ if (fse) {
+ /*
+ * dir entry without file entry, or dir does not
+ * exist -> file doesn't exist
+ */
+ errno = ENOENT;
+ return NULL;
+ }
+ }
+
+ /* create the directory listing */
+ fse = fsentry_create_list(cache, key->list ? key->list : key, &dir_not_found);
+
+ /* leave on error (errno set by fsentry_create_list) */
+ if (!fse) {
+ if (dir_not_found && key->list) {
+ /*
+ * Record that the directory does not exist (or is
+ * empty, which for all practical matters is the same
+ * thing as far as fscache is concerned).
+ */
+ fse = fsentry_alloc(cache, key->list->list,
+ key->list->dirent.d_name,
+ key->list->len);
+ fse->st_mode = 0;
+ hashmap_add(&cache->map, &fse->ent);
+ }
+ return NULL;
+ }
+
+ /* add directory listing to the cache */
+ cache->fscache_misses++;
+ fscache_add(cache, fse);
+
+ /* lookup file entry if requested (fse already points to directory) */
+ if (key->list)
+ fse = hashmap_get_entry(&cache->map, key, ent, NULL);
+
+ if (fse && !fse->st_mode)
+ fse = NULL; /* non-existing directory */
+
+ /* return entry or ENOENT */
+ if (fse)
+ fsentry_addref(fse);
+ else
+ errno = ENOENT;
+
+ return fse;
+}
+
+/*
+ * Enables the cache. Note that the cache is read-only, changes to
+ * the working directory are NOT reflected in the cache while enabled.
+ */
+int fscache_enable(size_t initial_size)
+{
+ int fscache;
+ struct fscache *cache;
+ int result = 0;
+
+ /* allow the cache to be disabled entirely */
+ fscache = git_env_bool("GIT_TEST_FSCACHE", -1);
+ if (fscache != -1)
+ core_fscache = fscache;
+ if (!core_fscache)
+ return 0;
+
+ /*
+ * refcount the global fscache initialization so that the
+ * opendir and lstat function pointers are redirected if
+ * any threads are using the fscache.
+ */
+ EnterCriticalSection(&fscache_cs);
+ if (!initialized) {
+ if (!dwTlsIndex) {
+ dwTlsIndex = TlsAlloc();
+ if (dwTlsIndex == TLS_OUT_OF_INDEXES) {
+ LeaveCriticalSection(&fscache_cs);
+ return 0;
+ }
+ }
+
+ /* redirect opendir and lstat to the fscache implementations */
+ opendir = fscache_opendir;
+ lstat = fscache_lstat;
+ win32_is_mount_point = fscache_is_mount_point;
+ }
+ initialized++;
+ LeaveCriticalSection(&fscache_cs);
+
+ /* refcount the thread specific initialization */
+ cache = fscache_getcache();
+ if (cache) {
+ cache->enabled++;
+ } else {
+ cache = (struct fscache *)xcalloc(1, sizeof(*cache));
+ cache->enabled = 1;
+ /*
+ * avoid having to rehash by leaving room for the parent dirs.
+ * '4' was determined empirically by testing several repos
+ */
+ hashmap_init(&cache->map, (hashmap_cmp_fn)fsentry_cmp, NULL, initial_size * 4);
+ mem_pool_init(&cache->mem_pool, 0);
+ if (!TlsSetValue(dwTlsIndex, cache))
+ BUG("TlsSetValue error");
+ }
+
+ trace_printf_key(&trace_fscache, "fscache: enable\n");
+ return result;
+}
+
+/*
+ * Disables the cache.
+ */
+void fscache_disable(void)
+{
+ struct fscache *cache;
+
+ if (!core_fscache)
+ return;
+
+ /* update the thread specific fscache initialization */
+ cache = fscache_getcache();
+ if (!cache)
+ BUG("fscache_disable() called on a thread where fscache has not been initialized");
+ if (!cache->enabled)
+ BUG("fscache_disable() called on an fscache that is already disabled");
+ cache->enabled--;
+ if (!cache->enabled) {
+ TlsSetValue(dwTlsIndex, NULL);
+ trace_printf_key(&trace_fscache, "fscache_disable: lstat %u, opendir %u, "
+ "total requests/misses %u/%u\n",
+ cache->lstat_requests, cache->opendir_requests,
+ cache->fscache_requests, cache->fscache_misses);
+ mem_pool_discard(&cache->mem_pool, 0);
+ hashmap_clear(&cache->map);
+ free(cache);
+ }
+
+ /* update the global fscache initialization */
+ EnterCriticalSection(&fscache_cs);
+ initialized--;
+ if (!initialized) {
+ /* reset opendir and lstat to the original implementations */
+ opendir = dirent_opendir;
+ lstat = mingw_lstat;
+ win32_is_mount_point = mingw_is_mount_point;
+ }
+ LeaveCriticalSection(&fscache_cs);
+
+ trace_printf_key(&trace_fscache, "fscache: disable\n");
+ return;
+}
+
+/*
+ * Flush cached stats result when fscache is enabled.
+ */
+void fscache_flush(void)
+{
+ struct fscache *cache = fscache_getcache();
+
+ if (cache && cache->enabled) {
+ fscache_clear(cache);
+ }
+}
+
+/*
+ * Lstat replacement, uses the cache if enabled, otherwise redirects to
+ * mingw_lstat.
+ */
+int fscache_lstat(const char *filename, struct stat *st)
+{
+ int dirlen, base, len;
+#pragma GCC diagnostic push
+#ifdef __clang__
+#pragma GCC diagnostic ignored "-Wflexible-array-extensions"
+#endif
+ struct heap_fsentry key[2];
+#pragma GCC diagnostic pop
+ struct fsentry *fse;
+ struct fscache *cache = fscache_getcache();
+
+ if (!cache || !do_fscache_enabled(cache, filename))
+ return mingw_lstat(filename, st);
+
+ cache->lstat_requests++;
+ /* split filename into path + name */
+ len = strlen(filename);
+ if (len && is_dir_sep(filename[len - 1]))
+ len--;
+ base = len;
+ while (base && !is_dir_sep(filename[base - 1]))
+ base--;
+ dirlen = base ? base - 1 : 0;
+
+ /* lookup entry for path + name in cache */
+ fsentry_init(&key[0].u.ent, NULL, filename, dirlen);
+ fsentry_init(&key[1].u.ent, &key[0].u.ent, filename + base, len - base);
+ fse = fscache_get(cache, &key[1].u.ent);
+ if (!fse) {
+ errno = ENOENT;
+ return -1;
+ }
+
+ /*
+ * Special case symbolic links: FindFirstFile()/FindNextFile() did not
+ * provide us with the length of the target path.
+ */
+ if (fse->u.s.st_size == MAX_LONG_PATH && S_ISLNK(fse->st_mode)) {
+ char buf[MAX_LONG_PATH];
+ int len = readlink(filename, buf, sizeof(buf) - 1);
+
+ if (len > 0)
+ fse->u.s.st_size = len;
+ }
+
+ /* copy stat data */
+ st->st_ino = 0;
+ st->st_gid = 0;
+ st->st_uid = 0;
+ st->st_dev = 0;
+ st->st_rdev = 0;
+ st->st_nlink = 1;
+ st->st_mode = fse->st_mode;
+ st->st_size = fse->u.s.st_size;
+ st->st_atim = fse->u.s.st_atim;
+ st->st_mtim = fse->u.s.st_mtim;
+ st->st_ctim = fse->u.s.st_ctim;
+
+ /* don't forget to release fsentry */
+ fsentry_release(fse);
+ return 0;
+}
+
+/*
+ * is_mount_point() replacement, uses cache if enabled, otherwise falls
+ * back to mingw_is_mount_point().
+ */
+int fscache_is_mount_point(struct strbuf *path)
+{
+ int dirlen, base, len;
+#pragma GCC diagnostic push
+#ifdef __clang__
+#pragma GCC diagnostic ignored "-Wflexible-array-extensions"
+#endif
+ struct heap_fsentry key[2];
+#pragma GCC diagnostic pop
+ struct fsentry *fse;
+ struct fscache *cache = fscache_getcache();
+
+ if (!cache || !do_fscache_enabled(cache, path->buf))
+ return mingw_is_mount_point(path);
+
+ cache->lstat_requests++;
+ /* split path into path + name */
+ len = path->len;
+ if (len && is_dir_sep(path->buf[len - 1]))
+ len--;
+ base = len;
+ while (base && !is_dir_sep(path->buf[base - 1]))
+ base--;
+ dirlen = base ? base - 1 : 0;
+
+ /* lookup entry for path + name in cache */
+ fsentry_init(&key[0].u.ent, NULL, path->buf, dirlen);
+ fsentry_init(&key[1].u.ent, &key[0].u.ent, path->buf + base, len - base);
+ fse = fscache_get(cache, &key[1].u.ent);
+ if (!fse)
+ return mingw_is_mount_point(path);
+ return fse->reparse_tag == IO_REPARSE_TAG_MOUNT_POINT;
+}
+
+typedef struct fscache_DIR {
+ struct DIR base_dir; /* extend base struct DIR */
+ struct fsentry *pfsentry;
+ struct dirent *dirent;
+} fscache_DIR;
+
+/*
+ * Readdir replacement.
+ */
+static struct dirent *fscache_readdir(DIR *base_dir)
+{
+ fscache_DIR *dir = (fscache_DIR*) base_dir;
+ struct fsentry *next = dir->pfsentry->next;
+ if (!next)
+ return NULL;
+ dir->pfsentry = next;
+ dir->dirent = &next->dirent;
+ return dir->dirent;
+}
+
+/*
+ * Closedir replacement.
+ */
+static int fscache_closedir(DIR *base_dir)
+{
+ fscache_DIR *dir = (fscache_DIR*) base_dir;
+ fsentry_release(dir->pfsentry);
+ free(dir);
+ return 0;
+}
+
+/*
+ * Opendir replacement, uses a directory listing from the cache if enabled,
+ * otherwise calls original dirent implementation.
+ */
+DIR *fscache_opendir(const char *dirname)
+{
+ struct heap_fsentry key;
+ struct fsentry *list;
+ fscache_DIR *dir;
+ int len;
+ struct fscache *cache = fscache_getcache();
+
+ if (!cache || !do_fscache_enabled(cache, dirname))
+ return dirent_opendir(dirname);
+
+ cache->opendir_requests++;
+ /* prepare name (strip trailing '/', replace '.') */
+ len = strlen(dirname);
+ if ((len == 1 && dirname[0] == '.') ||
+ (len && is_dir_sep(dirname[len - 1])))
+ len--;
+
+ /* get directory listing from cache */
+ fsentry_init(&key.u.ent, NULL, dirname, len);
+ list = fscache_get(cache, &key.u.ent);
+ if (!list)
+ return NULL;
+
+ /* alloc and return DIR structure */
+ dir = (fscache_DIR*) xmalloc(sizeof(fscache_DIR));
+ dir->base_dir.preaddir = fscache_readdir;
+ dir->base_dir.pclosedir = fscache_closedir;
+ dir->pfsentry = list;
+ return (DIR*) dir;
+}
+
+struct fscache *fscache_getcache(void)
+{
+ return (struct fscache *)TlsGetValue(dwTlsIndex);
+}
+
+void fscache_merge(struct fscache *dest)
+{
+ struct hashmap_iter iter;
+ struct hashmap_entry *e;
+ struct fscache *cache = fscache_getcache();
+
+ /*
+ * Only do the merge if fscache was enabled and we have a dest
+ * cache to merge into.
+ */
+ if (!dest) {
+ fscache_enable(0);
+ return;
+ }
+ if (!cache)
+ BUG("fscache_merge() called on a thread where fscache has not been initialized");
+
+ TlsSetValue(dwTlsIndex, NULL);
+ trace_printf_key(&trace_fscache, "fscache_merge: lstat %u, opendir %u, "
+ "total requests/misses %u/%u\n",
+ cache->lstat_requests, cache->opendir_requests,
+ cache->fscache_requests, cache->fscache_misses);
+
+ /*
+ * This is only safe because the primary thread we're merging into
+ * isn't being used so the critical section only needs to prevent
+ * the the child threads from stomping on each other.
+ */
+ EnterCriticalSection(&fscache_cs);
+
+ hashmap_iter_init(&cache->map, &iter);
+ while ((e = hashmap_iter_next(&iter)))
+ hashmap_add(&dest->map, e);
+
+ mem_pool_combine(&dest->mem_pool, &cache->mem_pool);
+
+ dest->lstat_requests += cache->lstat_requests;
+ dest->opendir_requests += cache->opendir_requests;
+ dest->fscache_requests += cache->fscache_requests;
+ dest->fscache_misses += cache->fscache_misses;
+ initialized--;
+ LeaveCriticalSection(&fscache_cs);
+
+ free(cache);
+
+}
diff --git a/compat/win32/fscache.h b/compat/win32/fscache.h
new file mode 100644
index 00000000000000..386c770a85d321
--- /dev/null
+++ b/compat/win32/fscache.h
@@ -0,0 +1,36 @@
+#ifndef FSCACHE_H
+#define FSCACHE_H
+
+/*
+ * The fscache is thread specific. enable_fscache() must be called
+ * for each thread where caching is desired.
+ */
+
+extern CRITICAL_SECTION fscache_cs;
+
+int fscache_enable(size_t initial_size);
+#define enable_fscache(initial_size) fscache_enable(initial_size)
+
+void fscache_disable(void);
+#define disable_fscache() fscache_disable()
+
+int fscache_enabled(const char *path);
+#define is_fscache_enabled(path) fscache_enabled(path)
+
+void fscache_flush(void);
+#define flush_fscache() fscache_flush()
+
+DIR *fscache_opendir(const char *dir);
+int fscache_lstat(const char *file_name, struct stat *buf);
+int fscache_is_mount_point(struct strbuf *path);
+
+/* opaque fscache structure */
+struct fscache;
+
+struct fscache *fscache_getcache(void);
+#define getcache_fscache() fscache_getcache()
+
+void fscache_merge(struct fscache *dest);
+#define merge_fscache(dest) fscache_merge(dest)
+
+#endif
diff --git a/compat/win32/ntifs.h b/compat/win32/ntifs.h
new file mode 100644
index 00000000000000..64ed792c52f352
--- /dev/null
+++ b/compat/win32/ntifs.h
@@ -0,0 +1,131 @@
+#ifndef _NTIFS_
+#define _NTIFS_
+
+/*
+ * Copy necessary structures and definitions out of the Windows DDK
+ * to enable calling NtQueryDirectoryFile()
+ */
+
+typedef _Return_type_success_(return >= 0) LONG NTSTATUS;
+#define NT_SUCCESS(Status) (((NTSTATUS)(Status)) >= 0)
+
+#if !defined(_NTSECAPI_) && !defined(_WINTERNL_) && \
+ !defined(__UNICODE_STRING_DEFINED)
+#define __UNICODE_STRING_DEFINED
+typedef struct _UNICODE_STRING {
+ USHORT Length;
+ USHORT MaximumLength;
+ PWSTR Buffer;
+} UNICODE_STRING;
+typedef UNICODE_STRING *PUNICODE_STRING;
+typedef const UNICODE_STRING *PCUNICODE_STRING;
+#endif /* !_NTSECAPI_ && !_WINTERNL_ && !__UNICODE_STRING_DEFINED */
+
+typedef enum _FILE_INFORMATION_CLASS {
+ FileDirectoryInformation = 1,
+ FileFullDirectoryInformation,
+ FileBothDirectoryInformation,
+ FileBasicInformation,
+ FileStandardInformation,
+ FileInternalInformation,
+ FileEaInformation,
+ FileAccessInformation,
+ FileNameInformation,
+ FileRenameInformation,
+ FileLinkInformation,
+ FileNamesInformation,
+ FileDispositionInformation,
+ FilePositionInformation,
+ FileFullEaInformation,
+ FileModeInformation,
+ FileAlignmentInformation,
+ FileAllInformation,
+ FileAllocationInformation,
+ FileEndOfFileInformation,
+ FileAlternateNameInformation,
+ FileStreamInformation,
+ FilePipeInformation,
+ FilePipeLocalInformation,
+ FilePipeRemoteInformation,
+ FileMailslotQueryInformation,
+ FileMailslotSetInformation,
+ FileCompressionInformation,
+ FileObjectIdInformation,
+ FileCompletionInformation,
+ FileMoveClusterInformation,
+ FileQuotaInformation,
+ FileReparsePointInformation,
+ FileNetworkOpenInformation,
+ FileAttributeTagInformation,
+ FileTrackingInformation,
+ FileIdBothDirectoryInformation,
+ FileIdFullDirectoryInformation,
+ FileValidDataLengthInformation,
+ FileShortNameInformation,
+ FileIoCompletionNotificationInformation,
+ FileIoStatusBlockRangeInformation,
+ FileIoPriorityHintInformation,
+ FileSfioReserveInformation,
+ FileSfioVolumeInformation,
+ FileHardLinkInformation,
+ FileProcessIdsUsingFileInformation,
+ FileNormalizedNameInformation,
+ FileNetworkPhysicalNameInformation,
+ FileIdGlobalTxDirectoryInformation,
+ FileIsRemoteDeviceInformation,
+ FileAttributeCacheInformation,
+ FileNumaNodeInformation,
+ FileStandardLinkInformation,
+ FileRemoteProtocolInformation,
+ FileMaximumInformation
+} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS;
+
+typedef struct _FILE_FULL_DIR_INFORMATION {
+ ULONG NextEntryOffset;
+ ULONG FileIndex;
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ LARGE_INTEGER EndOfFile;
+ LARGE_INTEGER AllocationSize;
+ ULONG FileAttributes;
+ ULONG FileNameLength;
+ ULONG EaSize;
+ WCHAR FileName[1];
+} FILE_FULL_DIR_INFORMATION, *PFILE_FULL_DIR_INFORMATION;
+
+typedef struct _IO_STATUS_BLOCK {
+ union {
+ NTSTATUS Status;
+ PVOID Pointer;
+ } u;
+ ULONG_PTR Information;
+} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
+
+typedef VOID
+(NTAPI *PIO_APC_ROUTINE)(
+ IN PVOID ApcContext,
+ IN PIO_STATUS_BLOCK IoStatusBlock,
+ IN ULONG Reserved);
+
+NTSYSCALLAPI
+NTSTATUS
+NTAPI
+NtQueryDirectoryFile(
+ _In_ HANDLE FileHandle,
+ _In_opt_ HANDLE Event,
+ _In_opt_ PIO_APC_ROUTINE ApcRoutine,
+ _In_opt_ PVOID ApcContext,
+ _Out_ PIO_STATUS_BLOCK IoStatusBlock,
+ _Out_writes_bytes_(Length) PVOID FileInformation,
+ _In_ ULONG Length,
+ _In_ FILE_INFORMATION_CLASS FileInformationClass,
+ _In_ BOOLEAN ReturnSingleEntry,
+ _In_opt_ PUNICODE_STRING FileName,
+ _In_ BOOLEAN RestartScan
+);
+
+#define STATUS_NO_MORE_FILES ((NTSTATUS)0x80000006L)
+
+#endif
diff --git a/compat/win32/path-utils.c b/compat/win32/path-utils.c
index 966ef779b9ca9b..c4fea0301b5ecc 100644
--- a/compat/win32/path-utils.c
+++ b/compat/win32/path-utils.c
@@ -2,6 +2,9 @@
#include "../../git-compat-util.h"
#include "../../environment.h"
+#include "../../wrapper.h"
+#include "../../strbuf.h"
+#include "../../versioncmp.h"
int win32_has_dos_drive_prefix(const char *path)
{
@@ -89,3 +92,199 @@ int win32_fspathcmp(const char *a, const char *b)
{
return win32_fspathncmp(a, b, (size_t)-1);
}
+
+static int read_at(int fd, char *buffer, size_t offset, size_t size)
+{
+ if (lseek(fd, offset, SEEK_SET) < 0) {
+ fprintf(stderr, "could not seek to 0x%x\n", (unsigned int)offset);
+ return -1;
+ }
+
+ return read_in_full(fd, buffer, size);
+}
+
+static size_t le16(const char *buffer)
+{
+ unsigned char *u = (unsigned char *)buffer;
+ return u[0] | (u[1] << 8);
+}
+
+static size_t le32(const char *buffer)
+{
+ return le16(buffer) | (le16(buffer + 2) << 16);
+}
+
+/*
+ * Determine the Go version of a given executable, if it was built with Go.
+ *
+ * This recapitulates the logic from
+ * https://github.com/golang/go/blob/master/src/cmd/go/internal/version/version.go
+ * (without requiring the user to install `go.exe` to find out).
+ */
+static ssize_t get_go_version(const char *path, char *go_version, size_t go_version_size)
+{
+ int fd = open(path, O_RDONLY);
+ char buffer[1024];
+ off_t offset;
+ size_t num_sections, opt_header_size, i;
+ char *p = NULL, *q;
+ ssize_t res = -1;
+
+ if (fd < 0)
+ return -1;
+
+ if (read_in_full(fd, buffer, 2) < 0)
+ goto fail;
+
+ /*
+ * Parse the PE file format, for more details, see
+ * https://en.wikipedia.org/wiki/Portable_Executable#Layout and
+ * https://learn.microsoft.com/en-us/windows/win32/debug/pe-format
+ */
+ if (buffer[0] != 'M' || buffer[1] != 'Z')
+ goto fail;
+
+ if (read_at(fd, buffer, 0x3c, 4) < 0)
+ goto fail;
+
+ /* Read the `PE\0\0` signature and the COFF file header */
+ offset = le32(buffer);
+ if (read_at(fd, buffer, offset, 24) < 0)
+ goto fail;
+
+ if (buffer[0] != 'P' || buffer[1] != 'E' || buffer[2] != '\0' || buffer[3] != '\0')
+ goto fail;
+
+ num_sections = le16(buffer + 6);
+ opt_header_size = le16(buffer + 20);
+ offset += 24; /* skip file header */
+
+ /*
+ * Validate magic number 0x10b or 0x20b, for full details see
+ * https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#optional-header-standard-fields-image-only
+ */
+ if (read_at(fd, buffer, offset, 2) < 0 ||
+ ((i = le16(buffer)) != 0x10b && i != 0x20b))
+ goto fail;
+
+ offset += opt_header_size;
+
+ for (i = 0; i < num_sections; i++) {
+ if (read_at(fd, buffer, offset + i * 40, 40) < 0)
+ goto fail;
+
+ /*
+ * For full details about the section headers, see
+ * https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#section-table-section-headers
+ */
+ if ((le32(buffer + 36) /* characteristics */ & ~0x600000) /* IMAGE_SCN_ALIGN_32BYTES */ ==
+ (/* IMAGE_SCN_CNT_INITIALIZED_DATA */ 0x00000040 |
+ /* IMAGE_SCN_MEM_READ */ 0x40000000 |
+ /* IMAGE_SCN_MEM_WRITE */ 0x80000000)) {
+ size_t size = le32(buffer + 16); /* "SizeOfRawData " */
+ size_t pointer = le32(buffer + 20); /* "PointerToRawData " */
+
+ /*
+ * Skip the section if either size or pointer is 0, see
+ * https://github.com/golang/go/blob/go1.21.0/src/debug/buildinfo/buildinfo.go#L333
+ * for full details.
+ *
+ * Merely seeing a non-zero size will not actually do,
+ * though: he size must be at least `buildInfoSize`,
+ * i.e. 32, and we expect a UVarint (at least another
+ * byte) _and_ the bytes representing the string,
+ * which we expect to start with the letters "go" and
+ * continue with the Go version number.
+ */
+ if (size < 32 + 1 + 2 + 1 || !pointer)
+ continue;
+
+ p = malloc(size);
+
+ if (!p || read_at(fd, p, pointer, size) < 0)
+ goto fail;
+
+ /*
+ * Look for the build information embedded by Go, see
+ * https://github.com/golang/go/blob/go1.21.0/src/debug/buildinfo/buildinfo.go#L165-L175
+ * for full details.
+ *
+ * Note: Go contains code to enforce alignment along a
+ * 16-byte boundary. In practice, no `.exe` has been
+ * observed that required any adjustment, therefore
+ * this here code skips that logic for simplicity.
+ */
+ q = memmem(p, size - 18, "\xff Go buildinf:", 14);
+ if (!q)
+ goto fail;
+ /*
+ * Decode the build blob. For full details, see
+ * https://github.com/golang/go/blob/go1.21.0/src/debug/buildinfo/buildinfo.go#L177-L191
+ *
+ * Note: The `endianness` values observed in practice
+ * were always 2, therefore the complex logic to handle
+ * any other value is skipped for simplicty.
+ */
+ if ((q[14] == 8 || q[14] == 4) && q[15] == 2) {
+ /*
+ * Only handle a Go version string with fewer
+ * than 128 characters, so the Go UVarint at
+ * q[32] that indicates the string's length must
+ * be only one byte (without the high bit set).
+ */
+ if ((q[32] & 0x80) ||
+ !q[32] ||
+ (q + 33 + q[32] - p) > (ssize_t)size ||
+ q[32] + 1 > (ssize_t)go_version_size)
+ goto fail;
+ res = q[32];
+ memcpy(go_version, q + 33, res);
+ go_version[res] = '\0';
+ break;
+ }
+ }
+ }
+
+fail:
+ free(p);
+ close(fd);
+ return res;
+}
+
+void win32_warn_about_git_lfs_on_windows7(int exit_code, const char *argv0)
+{
+ char buffer[128], *git_lfs = NULL;
+ const char *p;
+
+ /*
+ * Git LFS v3.5.1 fails with an Access Violation on Windows 7; That
+ * would usually show up as an exit code 0xc0000005. For some reason
+ * (probably because at this point, we no longer have the _original_
+ * HANDLE that was returned by `CreateProcess()`) we observe other
+ * values like 0xb00 and 0x2 instead. Since the exact exit code
+ * seems to be inconsistent, we check for a non-zero exit status.
+ */
+ if (exit_code == 0)
+ return;
+ if (GetVersion() >> 16 > 7601)
+ return; /* Warn only on Windows 7 or older */
+ if (!istarts_with(argv0, "git-lfs ") &&
+ strcasecmp(argv0, "git-lfs"))
+ return;
+ if (!(git_lfs = locate_in_PATH("git-lfs")))
+ return;
+ if (get_go_version(git_lfs, buffer, sizeof(buffer)) > 0 &&
+ skip_prefix(buffer, "go", &p) &&
+ versioncmp("1.21.0", p) <= 0)
+ warning("This program was built with Go v%s\n"
+ "i.e. without support for this Windows version:\n"
+ "\n\t%s\n"
+ "\n"
+ "To work around this, you can download and install a "
+ "working version from\n"
+ "\n"
+ "\thttps://github.com/git-lfs/git-lfs/releases/tag/"
+ "v3.4.1\n",
+ p, git_lfs);
+ free(git_lfs);
+}
diff --git a/compat/win32/path-utils.h b/compat/win32/path-utils.h
index a561c700e75713..a69483c332c1a7 100644
--- a/compat/win32/path-utils.h
+++ b/compat/win32/path-utils.h
@@ -34,4 +34,7 @@ int win32_fspathcmp(const char *a, const char *b);
int win32_fspathncmp(const char *a, const char *b, size_t count);
#define fspathncmp win32_fspathncmp
+void win32_warn_about_git_lfs_on_windows7(int exit_code, const char *argv0);
+#define warn_about_git_lfs_on_windows7 win32_warn_about_git_lfs_on_windows7
+
#endif
diff --git a/compat/win32/pthread.c b/compat/win32/pthread.c
index 7e93146963ec56..dcdf537cce680b 100644
--- a/compat/win32/pthread.c
+++ b/compat/win32/pthread.c
@@ -21,8 +21,8 @@ static unsigned __stdcall win32_start_routine(void *arg)
return 0;
}
-int pthread_create(pthread_t *thread, const void *attr UNUSED,
- void *(*start_routine)(void *), void *arg)
+int win32_pthread_create(pthread_t *thread, const void *attr UNUSED,
+ void *(*start_routine)(void *), void *arg)
{
thread->arg = arg;
thread->start_routine = start_routine;
@@ -53,7 +53,7 @@ int win32_pthread_join(pthread_t *thread, void **value_ptr)
}
}
-pthread_t pthread_self(void)
+pthread_t win32_pthread_self(void)
{
pthread_t t = { NULL };
t.tid = GetCurrentThreadId();
diff --git a/compat/win32/pthread.h b/compat/win32/pthread.h
index ccacc5a53ba976..51a3eefac8e9b4 100644
--- a/compat/win32/pthread.h
+++ b/compat/win32/pthread.h
@@ -49,8 +49,9 @@ typedef struct {
DWORD tid;
} pthread_t;
-int pthread_create(pthread_t *thread, const void *unused,
- void *(*start_routine)(void*), void *arg);
+int win32_pthread_create(pthread_t *thread, const void *unused,
+ void *(*start_routine)(void*), void *arg);
+#define pthread_create win32_pthread_create
/*
* To avoid the need of copying a struct, we use small macro wrapper to pass
@@ -61,7 +62,8 @@ int pthread_create(pthread_t *thread, const void *unused,
int win32_pthread_join(pthread_t *thread, void **value_ptr);
#define pthread_equal(t1, t2) ((t1).tid == (t2).tid)
-pthread_t pthread_self(void);
+pthread_t win32_pthread_self(void);
+#define pthread_self win32_pthread_self
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
diff --git a/compat/win32/wsl.c b/compat/win32/wsl.c
new file mode 100644
index 00000000000000..ab599770138b4e
--- /dev/null
+++ b/compat/win32/wsl.c
@@ -0,0 +1,142 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#include "../../git-compat-util.h"
+#include "../win32.h"
+#include "../../repository.h"
+#include "config.h"
+#include "ntifs.h"
+#include "wsl.h"
+
+int are_wsl_compatible_mode_bits_enabled(void)
+{
+ /* default to `false` during initialization */
+ static const int fallback = 0;
+ static int enabled = -1;
+
+ if (enabled < 0) {
+ /* avoid infinite recursion */
+ if (!the_repository)
+ return fallback;
+
+ if (the_repository->config &&
+ the_repository->config->hash_initialized &&
+ repo_config_get_bool(the_repository, "core.wslcompat", &enabled) < 0)
+ enabled = 0;
+ }
+
+ return enabled < 0 ? fallback : enabled;
+}
+
+int copy_wsl_mode_bits_from_disk(const wchar_t *wpath, ssize_t wpathlen,
+ _mode_t *mode)
+{
+ int ret = -1;
+ HANDLE h;
+ if (wpathlen >= 0) {
+ /*
+ * It's caller's duty to make sure wpathlen is reasonable so
+ * it does not overflow.
+ */
+ wchar_t *fn2 = (wchar_t*)alloca((wpathlen + 1) * sizeof(wchar_t));
+ memcpy(fn2, wpath, wpathlen * sizeof(wchar_t));
+ fn2[wpathlen] = 0;
+ wpath = fn2;
+ }
+ h = CreateFileW(wpath, FILE_READ_EA | SYNCHRONIZE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL, OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS |
+ FILE_FLAG_OPEN_REPARSE_POINT,
+ NULL);
+ if (h != INVALID_HANDLE_VALUE) {
+ ret = get_wsl_mode_bits_by_handle(h, mode);
+ CloseHandle(h);
+ }
+ return ret;
+}
+
+#ifndef LX_FILE_METADATA_HAS_UID
+#define LX_FILE_METADATA_HAS_UID 0x1
+#define LX_FILE_METADATA_HAS_GID 0x2
+#define LX_FILE_METADATA_HAS_MODE 0x4
+#define LX_FILE_METADATA_HAS_DEVICE_ID 0x8
+#define LX_FILE_CASE_SENSITIVE_DIR 0x10
+typedef struct _FILE_STAT_LX_INFORMATION {
+ LARGE_INTEGER FileId;
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ LARGE_INTEGER AllocationSize;
+ LARGE_INTEGER EndOfFile;
+ uint32_t FileAttributes;
+ uint32_t ReparseTag;
+ uint32_t NumberOfLinks;
+ ACCESS_MASK EffectiveAccess;
+ uint32_t LxFlags;
+ uint32_t LxUid;
+ uint32_t LxGid;
+ uint32_t LxMode;
+ uint32_t LxDeviceIdMajor;
+ uint32_t LxDeviceIdMinor;
+} FILE_STAT_LX_INFORMATION, *PFILE_STAT_LX_INFORMATION;
+#endif
+
+/*
+ * This struct is extended from the original FILE_FULL_EA_INFORMATION of
+ * Microsoft Windows.
+ */
+struct wsl_full_ea_info_t {
+ uint32_t NextEntryOffset;
+ uint8_t Flags;
+ uint8_t EaNameLength;
+ uint16_t EaValueLength;
+ char EaName[7];
+ char EaValue[4];
+ char Padding[1];
+};
+
+enum {
+ FileStatLxInformation = 70,
+};
+__declspec(dllimport) NTSTATUS WINAPI
+ NtQueryInformationFile(HANDLE FileHandle,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID FileInformation, ULONG Length,
+ uint32_t FileInformationClass);
+__declspec(dllimport) NTSTATUS WINAPI
+ NtSetInformationFile(HANDLE FileHandle, PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID FileInformation, ULONG Length,
+ uint32_t FileInformationClass);
+__declspec(dllimport) NTSTATUS WINAPI
+ NtSetEaFile(HANDLE FileHandle, PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID EaBuffer, ULONG EaBufferSize);
+
+int set_wsl_mode_bits_by_handle(HANDLE h, _mode_t mode)
+{
+ uint32_t value = mode;
+ struct wsl_full_ea_info_t ea_info;
+ IO_STATUS_BLOCK iob;
+ /* mode should be valid to make WSL happy */
+ assert(S_ISREG(mode) || S_ISDIR(mode));
+ ea_info.NextEntryOffset = 0;
+ ea_info.Flags = 0;
+ ea_info.EaNameLength = 6;
+ ea_info.EaValueLength = sizeof(value); /* 4 */
+ strlcpy(ea_info.EaName, "$LXMOD", sizeof(ea_info.EaName));
+ memcpy(ea_info.EaValue, &value, sizeof(value));
+ ea_info.Padding[0] = 0;
+ return NtSetEaFile(h, &iob, &ea_info, sizeof(ea_info));
+}
+
+int get_wsl_mode_bits_by_handle(HANDLE h, _mode_t *mode)
+{
+ FILE_STAT_LX_INFORMATION fxi;
+ IO_STATUS_BLOCK iob;
+ if (NtQueryInformationFile(h, &iob, &fxi, sizeof(fxi),
+ FileStatLxInformation) == 0) {
+ if (fxi.LxFlags & LX_FILE_METADATA_HAS_MODE)
+ *mode = (_mode_t)fxi.LxMode;
+ return 0;
+ }
+ return -1;
+}
diff --git a/compat/win32/wsl.h b/compat/win32/wsl.h
new file mode 100644
index 00000000000000..1f5ad7e67a4fc2
--- /dev/null
+++ b/compat/win32/wsl.h
@@ -0,0 +1,12 @@
+#ifndef COMPAT_WIN32_WSL_H
+#define COMPAT_WIN32_WSL_H
+
+int are_wsl_compatible_mode_bits_enabled(void);
+
+int copy_wsl_mode_bits_from_disk(const wchar_t *wpath, ssize_t wpathlen,
+ _mode_t *mode);
+
+int get_wsl_mode_bits_by_handle(HANDLE h, _mode_t *mode);
+int set_wsl_mode_bits_by_handle(HANDLE h, _mode_t mode);
+
+#endif
diff --git a/compat/winansi.c b/compat/winansi.c
index ac2ffb78691a7d..0e0baf36e7ec7d 100644
--- a/compat/winansi.c
+++ b/compat/winansi.c
@@ -575,6 +575,9 @@ static void detect_msys_tty(int fd)
if (!NT_SUCCESS(NtQueryObject(h, ObjectNameInformation,
buffer, sizeof(buffer) - 2, &result)))
return;
+ if (result < sizeof(*nameinfo) || !nameinfo->Name.Buffer ||
+ !nameinfo->Name.Length)
+ return;
name = nameinfo->Name.Buffer;
name[nameinfo->Name.Length / sizeof(*name)] = 0;
@@ -593,6 +596,49 @@ static void detect_msys_tty(int fd)
#endif
+static HANDLE std_console_handle;
+static DWORD std_console_mode = ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+static UINT std_console_code_page = CP_UTF8;
+
+static void reset_std_console(void)
+{
+ if (std_console_mode != ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ SetConsoleMode(std_console_handle, std_console_mode);
+ if (std_console_code_page != CP_UTF8)
+ SetConsoleOutputCP(std_console_code_page);
+}
+
+static int enable_virtual_processing(void)
+{
+ std_console_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (std_console_handle == INVALID_HANDLE_VALUE ||
+ !GetConsoleMode(std_console_handle, &std_console_mode)) {
+ std_console_handle = GetStdHandle(STD_ERROR_HANDLE);
+ if (std_console_handle == INVALID_HANDLE_VALUE ||
+ !GetConsoleMode(std_console_handle, &std_console_mode))
+ return 0;
+ }
+
+ std_console_code_page = GetConsoleOutputCP();
+ if (std_console_code_page != CP_UTF8)
+ SetConsoleOutputCP(CP_UTF8);
+ if (!std_console_code_page)
+ std_console_code_page = CP_UTF8;
+
+ atexit(reset_std_console);
+
+ if (std_console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ return 1;
+
+ if (!SetConsoleMode(std_console_handle,
+ std_console_mode |
+ ENABLE_PROCESSED_OUTPUT |
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING))
+ return 0;
+
+ return 1;
+}
+
/*
* Wrapper for isatty(). Most calls in the main git code
* call isatty(1 or 2) to see if the instance is interactive
@@ -631,6 +677,9 @@ void winansi_init(void)
return;
}
+ if (enable_virtual_processing())
+ return;
+
/* create a named pipe to communicate with the console thread */
if (swprintf(name, ARRAY_SIZE(name) - 1, L"\\\\.\\pipe\\winansi%lu",
GetCurrentProcessId()) < 0)
diff --git a/config.mak.dev b/config.mak.dev
index c8dcf78779e60b..9123a287921602 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -22,8 +22,10 @@ endif
ifneq ($(uname_S),FreeBSD)
ifneq ($(or $(filter gcc6,$(COMPILER_FEATURES)),$(filter clang7,$(COMPILER_FEATURES))),)
+ifndef USE_MIMALLOC
DEVELOPER_CFLAGS += -std=gnu99
endif
+endif
else
# FreeBSD cannot limit to C99 because its system headers unconditionally
# rely on C11 features.
diff --git a/config.mak.uname b/config.mak.uname
index ccb3f718812740..3ebfa2cb5c4a0a 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -162,6 +162,17 @@ ifeq ($(uname_S),Darwin)
NEEDS_GOOD_LIBICONV = UnfortunatelyYes
endif
+ # Homebrew's LLVM clang ships a regex.h that lacks REG_ENHANCED,
+ # which is needed for USE_ENHANCED_BASIC_REGULAR_EXPRESSIONS above.
+ # Use our bundled regex instead. This became a practical problem
+ # when Homebrew 5.1.0 started auto-linking versioned keg-only
+ # formulae (like llvm@15) into $(HOMEBREW_PREFIX)/bin/, causing
+ # CC=clang in CI to silently pick up Homebrew's clang instead of
+ # Apple's /usr/bin/clang.
+ ifeq ($(CC),clang)
+ NO_REGEX = HomebrewsClangUsesARegexThatLacksREG_ENHANCED
+ endif
+
# The builtin FSMonitor on MacOS builds upon Simple-IPC. Both require
# Unix domain sockets and PThreads.
ifndef NO_PTHREADS
@@ -441,14 +452,8 @@ ifeq ($(uname_S),Windows)
GIT_VERSION := $(GIT_VERSION).MSVC
pathsep = ;
# Assume that this is built in Git for Windows' SDK
- ifeq (MINGW32,$(MSYSTEM))
- prefix = /mingw32
- else
- ifeq (CLANGARM64,$(MSYSTEM))
- prefix = /clangarm64
- else
- prefix = /mingw64
- endif
+ ifneq (,$(MSYSTEM))
+ prefix = $(MINGW_PREFIX)
endif
# Prepend MSVC 64-bit tool-chain to PATH.
#
@@ -456,7 +461,7 @@ ifeq ($(uname_S),Windows)
# link.exe next to, and required by, cl.exe, we have to prepend this
# onto the existing $PATH.
#
- SANE_TOOL_PATH ?= $(msvc_bin_dir_msys)
+ SANE_TOOL_PATH ?= $(msvc_bin_dir_msys):$(sdk_ver_bin_dir_msys)
HAVE_ALLOCA_H = YesPlease
NO_PREAD = YesPlease
NO_WRITEV = YesPlease
@@ -502,7 +507,8 @@ ifeq ($(uname_S),Windows)
NO_POSIX_GOODIES = UnfortunatelyYes
NATIVE_CRLF = YesPlease
DEFAULT_HELP_FORMAT = html
-ifeq (/mingw64,$(subst 32,64,$(subst clangarm,mingw,$(prefix))))
+ SKIP_DASHED_BUILT_INS = YabbaDabbaDoo
+ifneq (,$(MINGW_PREFIX))
# Move system config into top-level /etc/
ETC_GITCONFIG = ../etc/gitconfig
ETC_GITATTRIBUTES = ../etc/gitattributes
@@ -511,20 +517,24 @@ endif
CC = compat/vcbuild/scripts/clink.pl
AR = compat/vcbuild/scripts/lib.pl
CFLAGS =
- BASIC_CFLAGS = -nologo -I. -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE
+ BASIC_CFLAGS = -nologo -I. -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE -MP -std:c11
COMPAT_OBJS = compat/msvc.o compat/winansi.o \
compat/win32/flush.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/trace2_win32_process_info.o \
- compat/win32/dirent.o
- COMPAT_CFLAGS = -D__USE_MINGW_ACCESS -DDETECT_MSYS_TTY -DNOGDI -DHAVE_STRING_H -Icompat -Icompat/regex -Icompat/win32 -DSTRIP_EXTENSION=\".exe\"
- BASIC_LDFLAGS = -IGNORE:4217 -IGNORE:4049 -NOLOGO -ENTRY:wmainCRTStartup -SUBSYSTEM:CONSOLE
+ compat/win32/dirent.o compat/win32/fscache.o compat/win32/wsl.o
+ COMPAT_CFLAGS = -D__USE_MINGW_ACCESS -DDETECT_MSYS_TTY \
+ -DENSURE_MSYSTEM_IS_SET="\"$(MSYSTEM)\"" -DMINGW_PREFIX="\"$(patsubst /%,%,$(MINGW_PREFIX))\"" \
+ -DNOGDI -DHAVE_STRING_H -Icompat -Icompat/regex -Icompat/win32 -DSTRIP_EXTENSION=\".exe\"
+ BASIC_LDFLAGS = -IGNORE:4217 -IGNORE:4049 -NOLOGO
# invalidcontinue.obj allows Git's source code to close the same file
# handle twice, or to access the osfhandle of an already-closed stdout
# See https://msdn.microsoft.com/en-us/library/ms235330.aspx
EXTLIBS = user32.lib advapi32.lib shell32.lib wininet.lib ws2_32.lib invalidcontinue.obj kernel32.lib ntdll.lib
+ GITLIBS += git.res
PTHREAD_LIBS =
+ RC = compat/vcbuild/scripts/rc.pl
lib =
BASIC_CFLAGS += $(vcpkg_inc) $(sdk_includes) $(msvc_includes)
ifndef DEBUG
@@ -695,6 +705,7 @@ ifeq ($(uname_S),MINGW)
FSMONITOR_DAEMON_BACKEND = win32
FSMONITOR_OS_SETTINGS = win32
+ SKIP_DASHED_BUILT_INS = YabbaDabbaDoo
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
@@ -709,7 +720,8 @@ ifeq ($(uname_S),MINGW)
DEFAULT_HELP_FORMAT = html
HAVE_PLATFORM_PROCINFO = YesPlease
CSPRNG_METHOD = rtlgenrandom
- BASIC_LDFLAGS += -municode
+ BASIC_LDFLAGS += -municode -Wl,--tsaware
+ LAZYLOAD_LIBCURL = YesDoThatPlease
COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
@@ -717,7 +729,7 @@ ifeq ($(uname_S),MINGW)
compat/win32/flush.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
- compat/win32/dirent.o
+ compat/win32/dirent.o compat/win32/fscache.o compat/win32/wsl.o
BASIC_CFLAGS += -DWIN32
EXTLIBS += -lws2_32
GITLIBS += git.res
@@ -733,26 +745,25 @@ ifeq ($(uname_S),MINGW)
ifneq (,$(findstring -O,$(filter-out -O0 -Og,$(CFLAGS))))
BASIC_LDFLAGS += -Wl,--dynamicbase
endif
- ifeq (MINGW32,$(MSYSTEM))
- prefix = /mingw32
- HOST_CPU = i686
- BASIC_LDFLAGS += -Wl,--pic-executable,-e,_mainCRTStartup
- endif
- ifeq (MINGW64,$(MSYSTEM))
- prefix = /mingw64
- HOST_CPU = x86_64
- BASIC_LDFLAGS += -Wl,--pic-executable,-e,mainCRTStartup
- else ifeq (CLANGARM64,$(MSYSTEM))
- prefix = /clangarm64
- HOST_CPU = aarch64
- BASIC_LDFLAGS += -Wl,--pic-executable,-e,mainCRTStartup
- else
- COMPAT_CFLAGS += -D_USE_32BIT_TIME_T
- BASIC_LDFLAGS += -Wl,--large-address-aware
+ ifneq (,$(MSYSTEM))
+ ifeq ($(MINGW_PREFIX),$(filter-out /%,$(MINGW_PREFIX)))
+ # Override if empty or does not start with a slash
+ MINGW_PREFIX := /$(shell echo '$(MSYSTEM)' | tr A-Z a-z)
+ endif
+ prefix = $(MINGW_PREFIX)
+ HOST_CPU = $(patsubst %-w64-mingw32,%,$(MINGW_CHOST))
+ BASIC_LDFLAGS += -Wl,--pic-executable
+ COMPAT_CFLAGS += -DDETECT_MSYS_TTY \
+ -DENSURE_MSYSTEM_IS_SET="\"$(MSYSTEM)\"" \
+ -DMINGW_PREFIX="\"$(patsubst /%,%,$(MINGW_PREFIX))\""
+ ifeq (MINGW32,$(MSYSTEM))
+ BASIC_LDFLAGS += -Wl,--large-address-aware
+ endif
+ # Move system config into top-level /etc/
+ ETC_GITCONFIG = ../etc/gitconfig
+ ETC_GITATTRIBUTES = ../etc/gitattributes
endif
- CC = gcc
- COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0 -DDETECT_MSYS_TTY \
- -fstack-protector-strong
+ COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0 -fstack-protector-strong
EXTLIBS += -lntdll
EXTRA_PROGRAMS += headless-git$X
INSTALL = /bin/install
@@ -760,14 +771,8 @@ ifeq ($(uname_S),MINGW)
HAVE_LIBCHARSET_H = YesPlease
USE_GETTEXT_SCHEME = fallthrough
USE_LIBPCRE = YesPlease
- ifneq (CLANGARM64,$(MSYSTEM))
- USE_NED_ALLOCATOR = YesPlease
- endif
- ifeq (/mingw64,$(subst 32,64,$(subst clangarm,mingw,$(prefix))))
- # Move system config into top-level /etc/
- ETC_GITCONFIG = ../etc/gitconfig
- ETC_GITATTRIBUTES = ../etc/gitattributes
- endif
+ USE_MIMALLOC = YesPlease
+ NO_PYTHON =
endif
ifeq ($(uname_S),QNX)
COMPAT_CFLAGS += -DSA_RESTART=0
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 81b4306e72046c..b6053e7c75f71f 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -14,6 +14,11 @@ Note: Visual Studio also has the option of opening `CMakeLists.txt`
directly; Using this option, Visual Studio will not find the source code,
though, therefore the `File>Open>Folder...` option is preferred.
+Visual Studio does not produce a .sln solution file nor the .vcxproj files
+that may be required by VS extension tools.
+
+To generate the .sln/.vcxproj files run CMake manually, as described below.
+
Instructions to run CMake manually:
mkdir -p contrib/buildsystems/out
@@ -22,7 +27,7 @@ Instructions to run CMake manually:
This will build the git binaries in contrib/buildsystems/out
directory (our top-level .gitignore file knows to ignore contents of
-this directory).
+this directory). The project .sln and .vcxproj files are also generated.
Possible build configurations(-DCMAKE_BUILD_TYPE) with corresponding
compiler flags
@@ -35,17 +40,16 @@ empty(default) :
NOTE: -DCMAKE_BUILD_TYPE is optional. For multi-config generators like Visual Studio
this option is ignored
-This process generates a Makefile(Linux/*BSD/MacOS) , Visual Studio solution(Windows) by default.
+This process generates a Makefile(Linux/*BSD/MacOS), Visual Studio solution(Windows) by default.
Run `make` to build Git on Linux/*BSD/MacOS.
Open git.sln on Windows and build Git.
-NOTE: By default CMake uses Makefile as the build tool on Linux and Visual Studio in Windows,
-to use another tool say `ninja` add this to the command line when configuring.
-`-G Ninja`
-
NOTE: By default CMake will install vcpkg locally to your source tree on configuration,
to avoid this, add `-DNO_VCPKG=TRUE` to the command line when configuring.
+The Visual Studio default generator changed in v16.6 from its Visual Studio
+implemenation to `Ninja` This required changes to many CMake scripts.
+
]]
cmake_minimum_required(VERSION 3.14)
@@ -59,15 +63,29 @@ endif()
if(NOT DEFINED CMAKE_EXPORT_COMPILE_COMMANDS)
set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
+ message("settting CMAKE_EXPORT_COMPILE_COMMANDS: ${CMAKE_EXPORT_COMPILE_COMMANDS}")
endif()
if(USE_VCPKG)
set(VCPKG_DIR "${CMAKE_SOURCE_DIR}/compat/vcbuild/vcpkg")
+ message("WIN32: ${WIN32}") # show its underlying text values
+ message("VCPKG_DIR: ${VCPKG_DIR}")
+ message("VCPKG_ARCH: ${VCPKG_ARCH}") # maybe unset
+ message("MSVC: ${MSVC}")
+ message("CMAKE_GENERATOR: ${CMAKE_GENERATOR}")
+ message("CMAKE_CXX_COMPILER_ID: ${CMAKE_CXX_COMPILER_ID}")
+ message("CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}")
+ message("CMAKE_EXPORT_COMPILE_COMMANDS: ${CMAKE_EXPORT_COMPILE_COMMANDS}")
+ message("ENV(CMAKE_EXPORT_COMPILE_COMMANDS): $ENV{CMAKE_EXPORT_COMPILE_COMMANDS}")
if(NOT EXISTS ${VCPKG_DIR})
message("Initializing vcpkg and building the Git's dependencies (this will take a while...)")
- execute_process(COMMAND ${CMAKE_SOURCE_DIR}/compat/vcbuild/vcpkg_install.bat)
+ execute_process(COMMAND ${CMAKE_SOURCE_DIR}/compat/vcbuild/vcpkg_install.bat ${VCPKG_ARCH})
+ endif()
+ if(NOT EXISTS ${VCPKG_ARCH})
+ message("VCPKG_ARCH: unset, using 'x64-windows'")
+ set(VCPKG_ARCH "x64-windows") # default from vcpkg_install.bat
endif()
- list(APPEND CMAKE_PREFIX_PATH "${VCPKG_DIR}/installed/x64-windows")
+ list(APPEND CMAKE_PREFIX_PATH "${VCPKG_DIR}/installed/${VCPKG_ARCH}")
# In the vcpkg edition, we need this to be able to link to libcurl
set(CURL_NO_CURL_CMAKE ON)
@@ -208,11 +226,19 @@ if(CMAKE_C_COMPILER_ID STREQUAL "MSVC")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR})
add_compile_options(/MP /std:c11)
+ add_link_options(/MANIFEST:NO)
endif()
#default behaviour
include_directories(${CMAKE_SOURCE_DIR})
-add_compile_definitions(GIT_HOST_CPU="${CMAKE_SYSTEM_PROCESSOR}")
+
+# When cross-compiling, define HOST_CPU as the canonical name of the CPU on
+# which the built Git will run (for instance "x86_64").
+if(NOT HOST_CPU)
+ add_compile_definitions(GIT_HOST_CPU="${CMAKE_SYSTEM_PROCESSOR}")
+else()
+ add_compile_definitions(GIT_HOST_CPU="${HOST_CPU}")
+endif()
add_compile_definitions(SHA256_BLK INTERNAL_QSORT RUNTIME_PREFIX)
add_compile_definitions(NO_OPENSSL SHA1_DC SHA1DC_NO_STANDARD_INCLUDES
SHA1DC_INIT_SAFE_HASH_DEFAULT=0
@@ -256,7 +282,14 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
_CONSOLE DETECT_MSYS_TTY STRIP_EXTENSION=".exe" NO_SYMLINK_HEAD UNRELIABLE_FSTAT
NOGDI OBJECT_CREATION_MODE=1 __USE_MINGW_ANSI_STDIO=0
USE_NED_ALLOCATOR OVERRIDE_STRDUP MMAP_PREVENTS_DELETE USE_WIN32_MMAP
- HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET HAVE_RTLGENRANDOM)
+ HAVE_WPGMPTR HAVE_RTLGENRANDOM)
+ if(CMAKE_GENERATOR_PLATFORM STREQUAL "x64")
+ add_compile_definitions(ENSURE_MSYSTEM_IS_SET="MINGW64" MINGW_PREFIX="mingw64")
+ elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "arm64")
+ add_compile_definitions(ENSURE_MSYSTEM_IS_SET="CLANGARM64" MINGW_PREFIX="clangarm64")
+ elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "x86")
+ add_compile_definitions(ENSURE_MSYSTEM_IS_SET="MINGW32" MINGW_PREFIX="mingw32")
+ endif()
list(APPEND compat_SOURCES
compat/mingw.c
compat/winansi.c
@@ -267,8 +300,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
compat/win32/syslog.c
compat/win32/trace2_win32_process_info.c
compat/win32/dirent.c
+ compat/win32/wsl.c
compat/nedmalloc/nedmalloc.c
- compat/strdup.c)
+ compat/strdup.c
+ compat/win32/fscache.c)
set(NO_UNIX_SOCKETS 1)
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
@@ -731,6 +766,7 @@ if(WIN32)
endif()
add_executable(headless-git ${CMAKE_SOURCE_DIR}/compat/win32/headless.c)
+ list(APPEND PROGRAMS_BUILT headless-git)
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_C_COMPILER_ID STREQUAL "Clang")
target_link_options(headless-git PUBLIC -municode -Wl,-subsystem,windows)
elseif(CMAKE_C_COMPILER_ID STREQUAL "MSVC")
@@ -931,7 +967,7 @@ list(TRANSFORM git_perl_scripts PREPEND "${CMAKE_BINARY_DIR}/")
#install
foreach(program ${PROGRAMS_BUILT})
-if(program MATCHES "^(git|git-shell|scalar)$")
+if(program MATCHES "^(git|git-shell|headless-git|scalar)$")
install(TARGETS ${program}
RUNTIME DESTINATION bin)
else()
@@ -1199,7 +1235,7 @@ string(REPLACE "@USE_LIBPCRE2@" "" git_build_options "${git_build_options}")
string(REPLACE "@WITH_BREAKING_CHANGES@" "" git_build_options "${git_build_options}")
string(REPLACE "@X@" "${EXE_EXTENSION}" git_build_options "${git_build_options}")
if(USE_VCPKG)
- string(APPEND git_build_options "PATH=\"$PATH:$TEST_DIRECTORY/../compat/vcbuild/vcpkg/installed/x64-windows/bin\"\n")
+ string(APPEND git_build_options "PATH=\"$PATH:$TEST_DIRECTORY/../compat/vcbuild/vcpkg/installed/${VCPKG_ARCH}/bin\"\n")
endif()
file(WRITE ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS ${git_build_options})
diff --git a/contrib/subtree/Makefile b/contrib/subtree/Makefile
index c0c9f21cb78022..dab2dfc08ee222 100644
--- a/contrib/subtree/Makefile
+++ b/contrib/subtree/Makefile
@@ -95,7 +95,7 @@ $(GIT_SUBTREE_TEST): $(GIT_SUBTREE)
cp $< $@
test: $(GIT_SUBTREE_TEST)
- $(MAKE) -C t/ test
+ $(MAKE) -C t/ all
clean:
$(RM) $(GIT_SUBTREE)
diff --git a/credential.c b/credential.c
index 2594c0c4229ba0..af964189363b28 100644
--- a/credential.c
+++ b/credential.c
@@ -360,6 +360,9 @@ int credential_read(struct credential *c, FILE *fp,
credential_set_capability(&c->capa_authtype, op_type);
else if (!strcmp(value, "state"))
credential_set_capability(&c->capa_state, op_type);
+ } else if (!strcmp(key, "ntlm")) {
+ if (!strcmp(value, "allow"))
+ c->ntlm_allow = 1;
} else if (!strcmp(key, "continue")) {
c->multistage = !!git_config_bool("continue", value);
} else if (!strcmp(key, "password_expiry_utc")) {
@@ -420,6 +423,8 @@ void credential_write(const struct credential *c, FILE *fp,
if (c->ephemeral)
credential_write_item(c, fp, "ephemeral", "1", 0);
}
+ if (c->ntlm_suppressed)
+ credential_write_item(c, fp, "ntlm", "suppressed", 0);
credential_write_item(c, fp, "protocol", c->protocol, 1);
credential_write_item(c, fp, "host", c->host, 1);
credential_write_item(c, fp, "path", c->path, 0);
diff --git a/credential.h b/credential.h
index c78b72d110eaac..95244d5375dfe9 100644
--- a/credential.h
+++ b/credential.h
@@ -177,6 +177,9 @@ struct credential {
struct credential_capability capa_authtype;
struct credential_capability capa_state;
+ unsigned ntlm_suppressed:1,
+ ntlm_allow:1;
+
char *username;
char *password;
char *credential;
diff --git a/dir.c b/dir.c
index fcb8f6dd2aa969..11ea2ce044096c 100644
--- a/dir.c
+++ b/dir.c
@@ -1156,16 +1156,64 @@ static int add_patterns(const char *fname, const char *base, int baselen,
size_t size = 0;
char *buf;
- if (flags & PATTERN_NOFOLLOW)
- fd = open_nofollow(fname, O_RDONLY);
- else
- fd = open(fname, O_RDONLY);
-
- if (fd < 0 || fstat(fd, &st) < 0) {
- if (fd < 0)
- warn_on_fopen_errors(fname);
+ /*
+ * A performance optimization for status.
+ *
+ * During a status scan, git looks in each directory for a .gitignore
+ * file before scanning the directory. Since .gitignore files are not
+ * that common, we can waste a lot of time looking for files that are
+ * not there. Fortunately, the fscache already knows if the directory
+ * contains a .gitignore file, since it has already read the directory
+ * and it already has the stat-data.
+ *
+ * If the fscache is enabled, use the fscache-lstat() interlude to see
+ * if the file exists (in the fscache hash maps) before trying to open()
+ * it.
+ *
+ * This causes problem when the .gitignore file is a symlink, because
+ * we call lstat() rather than stat() on the symlnk and the resulting
+ * stat-data is for the symlink itself rather than the target file.
+ * We CANNOT use stat() here because the fscache DOES NOT install an
+ * interlude for stat() and mingw_stat() always calls "open-fstat-close"
+ * on the file and defeats the purpose of the optimization here. Since
+ * symlinks are even more rare than .gitignore files, we force a fstat()
+ * after our open() to get stat-data for the target file.
+ *
+ * Since `clang`'s `-Wunreachable-code` mode is clever, it would figure
+ * out that on non-Windows platforms, this `lstat()` is unreachable.
+ * We do want to keep the conditional block for the sake of Windows,
+ * though, so let's use the `NOT_CONSTANT()` trick to suppress that error.
+ */
+ if (NOT_CONSTANT(is_fscache_enabled(fname))) {
+ if (lstat(fname, &st) < 0) {
+ fd = -1;
+ } else {
+ fd = open(fname, O_RDONLY);
+ if (fd < 0)
+ warn_on_fopen_errors(fname);
+ else if (S_ISLNK(st.st_mode) && fstat(fd, &st) < 0) {
+ warn_on_fopen_errors(fname);
+ close(fd);
+ fd = -1;
+ }
+ }
+ } else {
+ if (flags & PATTERN_NOFOLLOW)
+ fd = open_nofollow(fname, O_RDONLY);
else
- close(fd);
+ fd = open(fname, O_RDONLY);
+
+ if (fd < 0 || fstat(fd, &st) < 0) {
+ if (fd < 0)
+ warn_on_fopen_errors(fname);
+ else {
+ close(fd);
+ fd = -1;
+ }
+ }
+ }
+
+ if (fd < 0) {
if (!istate)
return -1;
r = read_skip_worktree_file_from_index(istate, fname,
@@ -3411,6 +3459,13 @@ static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up)
return 0;
}
+ if (is_mount_point(path)) {
+ /* Do not descend and nuke a mount point or junction. */
+ if (kept_up)
+ *kept_up = 1;
+ return 0;
+ }
+
flag &= ~REMOVE_DIR_KEEP_TOPLEVEL;
dir = opendir(path->buf);
if (!dir) {
diff --git a/editor.c b/editor.c
index fd174e6a034f1c..f6d960c6f30782 100644
--- a/editor.c
+++ b/editor.c
@@ -13,6 +13,7 @@
#include "strvec.h"
#include "run-command.h"
#include "sigchain.h"
+#include "compat/terminal.h"
#ifndef DEFAULT_EDITOR
#define DEFAULT_EDITOR "vi"
@@ -64,6 +65,7 @@ static int launch_specified_editor(const char *editor, const char *path,
return error("Terminal is dumb, but EDITOR unset");
if (strcmp(editor, ":")) {
+ int save_and_restore_term = !strcmp(editor, "vi") || !strcmp(editor, "vim");
struct strbuf realpath = STRBUF_INIT;
struct child_process p = CHILD_PROCESS_INIT;
int ret, sig;
@@ -92,7 +94,11 @@ static int launch_specified_editor(const char *editor, const char *path,
strvec_pushv(&p.env, (const char **)env);
p.use_shell = 1;
p.trace2_child_class = "editor";
+ if (save_and_restore_term)
+ save_and_restore_term = !save_term(1);
if (start_command(&p) < 0) {
+ if (save_and_restore_term)
+ restore_term();
strbuf_release(&realpath);
return error("unable to start editor '%s'", editor);
}
@@ -100,6 +106,8 @@ static int launch_specified_editor(const char *editor, const char *path,
sigchain_push(SIGINT, SIG_IGN);
sigchain_push(SIGQUIT, SIG_IGN);
ret = finish_command(&p);
+ if (save_and_restore_term)
+ restore_term();
strbuf_release(&realpath);
sig = ret - 128;
sigchain_pop(SIGINT);
diff --git a/entry.c b/entry.c
index 7817aee362ed9e..b299e3f1071ff6 100644
--- a/entry.c
+++ b/entry.c
@@ -324,7 +324,7 @@ static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca
if (!has_symlinks || to_tempfile)
goto write_file_entry;
- ret = symlink(new_blob, path);
+ ret = create_symlink(state->istate, new_blob, path);
free(new_blob);
if (ret)
return error_errno("unable to create symlink %s", path);
@@ -411,6 +411,9 @@ static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca
}
finish:
+ /* Flush cached lstat in fscache after writing to disk. */
+ flush_fscache();
+
if (state->refresh_cache) {
if (!fstat_done && lstat(ce->name, &st) < 0)
return error_errno("unable to stat just-written file %s",
diff --git a/fetch-pack.c b/fetch-pack.c
index a32224ed026592..b9532eab1289c1 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -764,6 +764,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
save_commit_buffer = 0;
trace2_region_enter("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
+ enable_fscache(0);
for (ref = *refs; ref; ref = ref->next) {
struct commit *commit;
@@ -788,6 +789,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
if (!cutoff || cutoff < commit->date)
cutoff = commit->date;
}
+ disable_fscache();
trace2_region_leave("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
/*
diff --git a/fsmonitor-settings.c b/fsmonitor-settings.c
index a6587a8972b184..b4c29f44a27827 100644
--- a/fsmonitor-settings.c
+++ b/fsmonitor-settings.c
@@ -5,6 +5,7 @@
#include "fsmonitor-ipc.h"
#include "fsmonitor-settings.h"
#include "fsmonitor-path-utils.h"
+#include "advice.h"
/*
* We keep this structure definition private and have getters
@@ -100,6 +101,31 @@ static struct fsmonitor_settings *alloc_settings(void)
return s;
}
+static int check_deprecated_builtin_config(struct repository *r)
+{
+ int core_use_builtin_fsmonitor = 0;
+
+ /*
+ * If 'core.useBuiltinFSMonitor' is set, print a deprecation warning
+ * suggesting the use of 'core.fsmonitor' instead. If the config is
+ * set to true, set the appropriate mode and return 1 indicating that
+ * the check resulted the config being set by this (deprecated) setting.
+ */
+ if(!repo_config_get_bool(r, "core.useBuiltinFSMonitor", &core_use_builtin_fsmonitor) &&
+ core_use_builtin_fsmonitor) {
+ if (!git_env_bool("GIT_SUPPRESS_USEBUILTINFSMONITOR_ADVICE", 0)) {
+ advise_if_enabled(ADVICE_USE_CORE_FSMONITOR_CONFIG,
+ _("core.useBuiltinFSMonitor=true is deprecated;"
+ "please set core.fsmonitor=true instead"));
+ setenv("GIT_SUPPRESS_USEBUILTINFSMONITOR_ADVICE", "1", 1);
+ }
+ fsm_settings__set_ipc(r);
+ return 1;
+ }
+
+ return 0;
+}
+
static void lookup_fsmonitor_settings(struct repository *r)
{
const char *const_str;
@@ -126,12 +152,16 @@ static void lookup_fsmonitor_settings(struct repository *r)
return;
case 1: /* config value was unset */
+ if (check_deprecated_builtin_config(r))
+ return;
+
const_str = getenv("GIT_TEST_FSMONITOR");
break;
case -1: /* config value set to an arbitrary string */
- if (repo_config_get_pathname(r, "core.fsmonitor", &to_free))
- return; /* should not happen */
+ if (check_deprecated_builtin_config(r) ||
+ repo_config_get_pathname(r, "core.fsmonitor", &to_free))
+ return;
const_str = to_free;
break;
diff --git a/git-compat-util.h b/git-compat-util.h
index 4b4ea2498f13ef..09933281c46b58 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -158,9 +158,11 @@ static inline int is_xplatform_dir_sep(int c)
/* pull in Windows compatibility stuff */
#include "compat/win32/path-utils.h"
#include "compat/mingw.h"
+#include "compat/win32/fscache.h"
#elif defined(_MSC_VER)
#include "compat/win32/path-utils.h"
#include "compat/msvc.h"
+#include "compat/win32/fscache.h"
#endif
/* used on Mac OS X */
@@ -261,6 +263,13 @@ static inline int git_offset_1st_component(const char *path)
#define fspathncmp git_fspathncmp
#endif
+#ifndef warn_about_git_lfs_on_windows7
+static inline void warn_about_git_lfs_on_windows7(int exit_code UNUSED,
+ const char *argv0 UNUSED)
+{
+}
+#endif
+
#ifndef is_valid_path
#define is_valid_path(path) 1
#endif
@@ -350,10 +359,28 @@ static inline int git_has_dir_sep(const char *path)
#define has_dir_sep(path) git_has_dir_sep(path)
#endif
+#ifndef is_mount_point
+#define is_mount_point is_mount_point_via_stat
+#endif
+
+#ifndef create_symlink
+struct index_state;
+static inline int git_create_symlink(struct index_state *index UNUSED,
+ const char *target, const char *link)
+{
+ return symlink(target, link);
+}
+#define create_symlink git_create_symlink
+#endif
+
#ifndef query_user_email
#define query_user_email() NULL
#endif
+#ifndef platform_strbuf_realpath
+#define platform_strbuf_realpath(resolved, path) NULL
+#endif
+
#ifdef __TANDEM
#include
#include
@@ -584,17 +611,23 @@ static inline bool strip_suffix(const char *str, const char *suffix,
* the stack overflow can occur.
*/
#define DEFAULT_MAX_ALLOWED_TREE_DEPTH 512
-#elif defined(GIT_WINDOWS_NATIVE) && defined(__clang__) && defined(__aarch64__)
+#elif defined(GIT_WINDOWS_NATIVE) && defined(__clang__)
/*
- * Similar to Visual C, it seems that on Windows/ARM64 the clang-based
- * builds have a smaller stack space available. When running out of
- * that stack space, a `STATUS_STACK_OVERFLOW` is produced. When the
+ * Similar to Visual C, it seems that clang-based builds on Windows
+ * have a smaller stack space available. When running out of that
+ * stack space, a `STATUS_STACK_OVERFLOW` is produced. When the
* Git command was run from an MSYS2 Bash, this unfortunately results
* in an exit code 127. Let's prevent that by lowering the maximal
- * tree depth; This value seems to be low enough.
+ * tree depth; Unfortunately, it seems that the exact limit differs
+ * for aarch64 vs x86_64, and the difference is too large to simply
+ * use a single limit.
*/
+#if defined(__aarch64__)
#define DEFAULT_MAX_ALLOWED_TREE_DEPTH 1280
#else
+#define DEFAULT_MAX_ALLOWED_TREE_DEPTH 1152
+#endif
+#else
#define DEFAULT_MAX_ALLOWED_TREE_DEPTH 2048
#endif
@@ -1025,6 +1058,45 @@ static inline int is_missing_file_error(int errno_)
return (errno_ == ENOENT || errno_ == ENOTDIR);
}
+/*
+ * Enable/disable a read-only cache for file system data on platforms that
+ * support it.
+ *
+ * Implementing a live-cache is complicated and requires special platform
+ * support (inotify, ReadDirectoryChangesW...). enable_fscache shall be used
+ * to mark sections of git code that extensively read from the file system
+ * without modifying anything. Implementations can use this to cache e.g. stat
+ * data or even file content without the need to synchronize with the file
+ * system.
+ */
+
+ /* opaque fscache structure */
+struct fscache;
+
+#ifndef enable_fscache
+#define enable_fscache(x) /* noop */
+#endif
+
+#ifndef disable_fscache
+#define disable_fscache() /* noop */
+#endif
+
+#ifndef is_fscache_enabled
+#define is_fscache_enabled(path) (0)
+#endif
+
+#ifndef flush_fscache
+#define flush_fscache() /* noop */
+#endif
+
+#ifndef getcache_fscache
+#define getcache_fscache() (NULL) /* noop */
+#endif
+
+#ifndef merge_fscache
+#define merge_fscache(dest) /* noop */
+#endif
+
int cmd_main(int, const char **);
/*
diff --git a/git-curl-compat.h b/git-curl-compat.h
index dccdd4d6e54158..5c8ceb076adea2 100644
--- a/git-curl-compat.h
+++ b/git-curl-compat.h
@@ -45,6 +45,14 @@
#define GIT_CURL_HAVE_CURLINFO_RETRY_AFTER 1
#endif
+/**
+ * CURLSSLOPT_AUTO_CLIENT_CERT was added in 7.77.0, released in May
+ * 2021.
+ */
+#if LIBCURL_VERSION_NUM >= 0x074d00
+#define GIT_CURL_HAVE_CURLSSLOPT_AUTO_CLIENT_CERT
+#endif
+
/**
* CURLOPT_PROTOCOLS_STR and CURLOPT_REDIR_PROTOCOLS_STR were added in 7.85.0,
* released in August 2022.
diff --git a/git-gui/git-gui--askyesno.sh b/git-gui/git-gui--askyesno.sh
index 142d1bc3de229b..e431f86a8e16ae 100755
--- a/git-gui/git-gui--askyesno.sh
+++ b/git-gui/git-gui--askyesno.sh
@@ -29,8 +29,8 @@ if {$argc < 1} {
}
${NS}::frame .t
-${NS}::label .t.m -text $prompt -justify center -width 40
-.t.m configure -wraplength 400
+${NS}::label .t.m -text $prompt -justify center -width 400px
+.t.m configure -wraplength 400px
pack .t.m -side top -fill x -padx 20 -pady 20 -expand 1
pack .t -side top -fill x -ipadx 20 -ipady 20 -expand 1
@@ -59,5 +59,17 @@ if {$::tcl_platform(platform) eq {windows}} {
}
}
+if {$::tcl_platform(platform) eq {windows}} {
+ set icopath [file dirname [file normalize $argv0]]
+ if {[file tail $icopath] eq {git-core}} {
+ set icopath [file dirname $icopath]
+ }
+ set icopath [file dirname $icopath]
+ set icopath [file join $icopath share git git-for-windows.ico]
+ if {[file exists $icopath]} {
+ wm iconbitmap . -default $icopath
+ }
+}
+
wm title . $title
tk::PlaceWindow .
diff --git a/git-gui/git-gui.sh b/git-gui/git-gui.sh
index 23fe76e498bd17..799b564b926d0f 100755
--- a/git-gui/git-gui.sh
+++ b/git-gui/git-gui.sh
@@ -1934,6 +1934,7 @@ set all_icons(U$ui_index) file_merge
set all_icons(T$ui_index) file_statechange
set all_icons(_$ui_workdir) file_plain
+set all_icons(A$ui_workdir) file_plain
set all_icons(M$ui_workdir) file_mod
set all_icons(D$ui_workdir) file_question
set all_icons(U$ui_workdir) file_merge
@@ -1960,6 +1961,7 @@ foreach i {
{A_ {mc "Staged for commit"}}
{AM {mc "Portions staged for commit"}}
{AD {mc "Staged for commit, missing"}}
+ {AA {mc "Intended to be added"}}
{_D {mc "Missing"}}
{D_ {mc "Staged for removal"}}
diff --git a/git-gui/lib/diff.tcl b/git-gui/lib/diff.tcl
index 8be1a613fbe01f..d25a9bbdc4abde 100644
--- a/git-gui/lib/diff.tcl
+++ b/git-gui/lib/diff.tcl
@@ -556,7 +556,8 @@ proc apply_or_revert_hunk {x y revert} {
if {$current_diff_side eq $ui_index} {
set failed_msg [mc "Failed to unstage selected hunk."]
lappend apply_cmd --reverse --cached
- if {[string index $mi 0] ne {M}} {
+ set file_state [string index $mi 0]
+ if {$file_state ne {M} && $file_state ne {A}} {
unlock_index
return
}
@@ -569,7 +570,8 @@ proc apply_or_revert_hunk {x y revert} {
lappend apply_cmd --cached
}
- if {[string index $mi 1] ne {M}} {
+ set file_state [string index $mi 1]
+ if {$file_state ne {M} && $file_state ne {A}} {
unlock_index
return
}
@@ -661,7 +663,8 @@ proc apply_or_revert_range_or_line {x y revert} {
set failed_msg [mc "Failed to unstage selected line."]
set to_context {+}
lappend apply_cmd --reverse --cached
- if {[string index $mi 0] ne {M}} {
+ set file_state [string index $mi 0]
+ if {$file_state ne {M} && $file_state ne {A}} {
unlock_index
return
}
@@ -676,7 +679,8 @@ proc apply_or_revert_range_or_line {x y revert} {
lappend apply_cmd --cached
}
- if {[string index $mi 1] ne {M}} {
+ set file_state [string index $mi 1]
+ if {$file_state ne {M} && $file_state ne {A}} {
unlock_index
return
}
diff --git a/git-sh-setup.sh b/git-sh-setup.sh
index 19aef72ec25530..c51ad34148ccf3 100644
--- a/git-sh-setup.sh
+++ b/git-sh-setup.sh
@@ -292,17 +292,30 @@ create_virtual_base() {
# Platform specific tweaks to work around some commands
case $(uname -s) in
*MINGW*)
- # Windows has its own (incompatible) sort and find
- sort () {
- /usr/bin/sort "$@"
- }
- find () {
- /usr/bin/find "$@"
- }
- # git sees Windows-style pwd
- pwd () {
- builtin pwd -W
- }
+ if test -x /usr/bin/sort
+ then
+ # Windows has its own (incompatible) sort; override
+ sort () {
+ /usr/bin/sort "$@"
+ }
+ fi
+ if test -x /usr/bin/find
+ then
+ # Windows has its own (incompatible) find; override
+ find () {
+ /usr/bin/find "$@"
+ }
+ fi
+ # On Windows, Git wants Windows paths. But /usr/bin/pwd spits out
+ # Unix-style paths. At least in Bash, we have a builtin pwd that
+ # understands the -W option to force "mixed" paths, i.e. with drive
+ # prefix but still with forward slashes. Let's use that, if available.
+ if type builtin >/dev/null 2>&1
+ then
+ pwd () {
+ builtin pwd -W
+ }
+ fi
is_absolute_path () {
case "$1" in
[/\\]* | [A-Za-z]:*)
diff --git a/git-svn.perl b/git-svn.perl
index 32c648c3956fa4..37af8e873a9738 100755
--- a/git-svn.perl
+++ b/git-svn.perl
@@ -305,6 +305,19 @@ sub term_init {
: new Term::ReadLine 'git-svn';
}
+sub deprecated_warning {
+ my @lines = @_;
+ if (-t STDERR) {
+ @lines = map { "\e[33m$_\e[0m" } @lines;
+ }
+ warn join("\n", @lines), "\n";
+}
+
+deprecated_warning(
+ "WARNING: \`git svn\` is no longer supported by the Git for Windows project.",
+ "See https://github.com/git-for-windows/git/issues/5405 for details."
+);
+
my $cmd;
for (my $i = 0; $i < @ARGV; $i++) {
if (defined $cmd{$ARGV[$i]}) {
diff --git a/git.c b/git.c
index 5a40eab8a26a66..8f5f92f137b38a 100644
--- a/git.c
+++ b/git.c
@@ -659,6 +659,7 @@ static struct cmd_struct commands[] = {
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
{ "submodule--helper", cmd_submodule__helper, RUN_SETUP },
+ { "survey", cmd_survey, RUN_SETUP },
{ "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE },
{ "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
{ "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
diff --git a/git.rc.in b/git.rc.in
index e69444eef3f0c5..cd671dc2f0d134 100644
--- a/git.rc.in
+++ b/git.rc.in
@@ -1,3 +1,4 @@
+#include
1 VERSIONINFO
FILEVERSION @GIT_MAJOR_VERSION@,@GIT_MINOR_VERSION@,@GIT_MICRO_VERSION@,@GIT_PATCH_LEVEL@
PRODUCTVERSION @GIT_MAJOR_VERSION@,@GIT_MINOR_VERSION@,@GIT_MICRO_VERSION@,@GIT_PATCH_LEVEL@
@@ -12,6 +13,7 @@ BEGIN
VALUE "OriginalFilename", "git.exe\0"
VALUE "ProductName", "Git\0"
VALUE "ProductVersion", "@GIT_VERSION@\0"
+ VALUE "FileVersion", "@GIT_VERSION@\0"
END
END
diff --git a/grep.c b/grep.c
index c7e1dc1e0ee4fe..4fc12251880544 100644
--- a/grep.c
+++ b/grep.c
@@ -1646,6 +1646,8 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle
bol = gs->buf;
left = gs->size;
+ if (left && gs->buf[left-1] == '\n')
+ left--;
while (left) {
const char *eol;
int hit;
diff --git a/http.c b/http.c
index d8d016891b7974..5bc2ad8b873a77 100644
--- a/http.c
+++ b/http.c
@@ -131,7 +131,8 @@ enum http_follow_config http_follow_config = HTTP_FOLLOW_INITIAL;
static struct credential cert_auth = CREDENTIAL_INIT;
static int ssl_cert_password_required;
-static unsigned long http_auth_methods = CURLAUTH_ANY;
+static unsigned long http_auth_any = CURLAUTH_ANY & ~CURLAUTH_NTLM;
+static unsigned long http_auth_methods;
static int http_auth_methods_restricted;
/* Modes for which empty_auth cannot actually help us. */
static unsigned long empty_auth_useless =
@@ -150,7 +151,12 @@ static char *cached_accept_language;
static char *http_ssl_backend;
-static int http_schannel_check_revoke = 1;
+static long http_schannel_check_revoke_mode =
+#ifdef CURLSSLOPT_REVOKE_BEST_EFFORT
+ CURLSSLOPT_REVOKE_BEST_EFFORT;
+#else
+ CURLSSLOPT_NO_REVOKE;
+#endif
static long http_retry_after = 0;
static long http_max_retries = 0;
@@ -163,6 +169,8 @@ static long http_max_retry_time = 300;
*/
static int http_schannel_use_ssl_cainfo;
+static int http_auto_client_cert;
+
static int always_auth_proactively(void)
{
return http_proactive_auth != PROACTIVE_AUTH_NONE &&
@@ -429,8 +437,29 @@ static int http_options(const char *var, const char *value,
return 0;
}
+ if (!strcmp("http.allowntlmauth", var)) {
+ if (git_config_bool(var, value)) {
+ http_auth_any |= CURLAUTH_NTLM;
+ } else {
+ http_auth_any &= ~CURLAUTH_NTLM;
+ }
+ return 0;
+ }
+
if (!strcmp("http.schannelcheckrevoke", var)) {
- http_schannel_check_revoke = git_config_bool(var, value);
+ if (value && !strcmp(value, "best-effort")) {
+ http_schannel_check_revoke_mode =
+#ifdef CURLSSLOPT_REVOKE_BEST_EFFORT
+ CURLSSLOPT_REVOKE_BEST_EFFORT;
+#else
+ CURLSSLOPT_NO_REVOKE;
+ warning(_("%s=%s unsupported by current cURL"),
+ var, value);
+#endif
+ } else
+ http_schannel_check_revoke_mode =
+ (git_config_bool(var, value) ?
+ 0 : CURLSSLOPT_NO_REVOKE);
return 0;
}
@@ -439,6 +468,11 @@ static int http_options(const char *var, const char *value,
return 0;
}
+ if (!strcmp("http.sslautoclientcert", var)) {
+ http_auto_client_cert = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp("http.minsessions", var)) {
min_curl_sessions = git_config_int(var, value, ctx->kvi);
if (min_curl_sessions > 1)
@@ -650,6 +684,11 @@ static void init_curl_http_auth(CURL *result)
credential_fill(the_repository, &http_auth, 1);
+ if (http_auth.ntlm_allow && !(http_auth_methods & CURLAUTH_NTLM)) {
+ http_auth_methods |= CURLAUTH_NTLM;
+ curl_easy_setopt(result, CURLOPT_HTTPAUTH, http_auth_methods);
+ }
+
if (http_auth.password) {
if (always_auth_proactively()) {
/*
@@ -709,11 +748,11 @@ static void init_curl_proxy_auth(CURL *result)
if (i == ARRAY_SIZE(proxy_authmethods)) {
warning("unsupported proxy authentication method %s: using anyauth",
http_proxy_authmethod);
- curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY);
+ curl_easy_setopt(result, CURLOPT_PROXYAUTH, http_auth_any);
}
}
else
- curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY);
+ curl_easy_setopt(result, CURLOPT_PROXYAUTH, http_auth_any);
}
static int has_cert_password(void)
@@ -1060,7 +1099,7 @@ static CURL *get_curl_handle(void)
}
curl_easy_setopt(result, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
- curl_easy_setopt(result, CURLOPT_HTTPAUTH, CURLAUTH_ANY);
+ curl_easy_setopt(result, CURLOPT_HTTPAUTH, http_auth_any);
#ifdef CURLGSSAPI_DELEGATION_FLAG
if (curl_deleg) {
@@ -1078,9 +1117,20 @@ static CURL *get_curl_handle(void)
}
#endif
- if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) &&
- !http_schannel_check_revoke) {
- curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, (long)CURLSSLOPT_NO_REVOKE);
+ if (http_ssl_backend && !strcmp("schannel", http_ssl_backend)) {
+ long ssl_options = 0;
+ if (http_schannel_check_revoke_mode) {
+ ssl_options |= http_schannel_check_revoke_mode;
+ }
+
+ if (http_auto_client_cert) {
+#ifdef GIT_CURL_HAVE_CURLSSLOPT_AUTO_CLIENT_CERT
+ ssl_options |= CURLSSLOPT_AUTO_CLIENT_CERT;
+#endif
+ }
+
+ if (ssl_options)
+ curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, ssl_options);
}
if (http_proactive_auth != PROACTIVE_AUTH_NONE)
@@ -1448,6 +1498,8 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
set_long_from_env(&http_max_retries, "GIT_HTTP_MAX_RETRIES");
set_long_from_env(&http_max_retry_time, "GIT_HTTP_MAX_RETRY_TIME");
+ http_auth_methods = http_auth_any;
+
curl_default = get_curl_handle();
}
@@ -1879,6 +1931,8 @@ static int handle_curl_result(struct slot_results *results)
} else if (missing_target(results))
return HTTP_MISSING_TARGET;
else if (results->http_code == 401) {
+ http_auth.ntlm_suppressed = (results->auth_avail & CURLAUTH_NTLM) &&
+ !(http_auth_any & CURLAUTH_NTLM);
if ((http_auth.username && http_auth.password) ||\
(http_auth.authtype && http_auth.credential)) {
if (http_auth.multistage) {
@@ -1888,6 +1942,16 @@ static int handle_curl_result(struct slot_results *results)
credential_reject(the_repository, &http_auth);
if (always_auth_proactively())
http_proactive_auth = PROACTIVE_AUTH_NONE;
+ if (http_auth.ntlm_suppressed) {
+ warning(_("Due to its cryptographic weaknesses, "
+ "NTLM authentication has been\n"
+ "disabled in Git by default. You can "
+ "re-enable it for trusted servers\n"
+ "by running:\n\n"
+ "git config set "
+ "http.%s://%s.allowNTLMAuth true"),
+ http_auth.protocol, http_auth.host);
+ }
return HTTP_NOAUTH;
} else {
http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE;
@@ -2401,6 +2465,13 @@ static int http_request_recoverable(const char *url,
credential_fill(the_repository, &http_auth, 1);
}
+ /*
+ * Re-enable NTLM auth if the helper allows it and we would
+ * otherwise suppress authentication via NTLM.
+ */
+ if (http_auth.ntlm_suppressed && http_auth.ntlm_allow)
+ http_auth_methods |= CURLAUTH_NTLM;
+
ret = http_request(url, result, target, options);
}
if (ret == HTTP_RATE_LIMITED) {
diff --git a/mem-pool.c b/mem-pool.c
index 8bc77cb0e80a35..89bca70f713692 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -7,7 +7,9 @@
#include "git-compat-util.h"
#include "mem-pool.h"
#include "gettext.h"
+#include "trace.h"
+static struct trace_key trace_mem_pool = TRACE_KEY_INIT(MEMPOOL);
#define BLOCK_GROWTH_SIZE (1024 * 1024 - sizeof(struct mp_block))
/*
@@ -65,12 +67,20 @@ void mem_pool_init(struct mem_pool *pool, size_t initial_size)
if (initial_size > 0)
mem_pool_alloc_block(pool, initial_size, NULL);
+
+ trace_printf_key(&trace_mem_pool,
+ "mem_pool (%p): init (%"PRIuMAX") initial size\n",
+ (void *)pool, (uintmax_t)initial_size);
}
void mem_pool_discard(struct mem_pool *pool, int invalidate_memory)
{
struct mp_block *block, *block_to_free;
+ trace_printf_key(&trace_mem_pool,
+ "mem_pool (%p): discard (%"PRIuMAX") unused\n",
+ (void *)pool,
+ (uintmax_t)(pool->mp_block->end - pool->mp_block->next_free));
block = pool->mp_block;
while (block)
{
diff --git a/meson.build b/meson.build
index 8309942d184847..d655d043ad1d96 100644
--- a/meson.build
+++ b/meson.build
@@ -677,6 +677,7 @@ builtin_sources = [
'builtin/stash.c',
'builtin/stripspace.c',
'builtin/submodule--helper.c',
+ 'builtin/survey.c',
'builtin/symbolic-ref.c',
'builtin/tag.c',
'builtin/unpack-file.c',
@@ -1281,16 +1282,17 @@ elif host_machine.system() == 'windows'
'compat/winansi.c',
'compat/win32/dirent.c',
'compat/win32/flush.c',
+ 'compat/win32/fscache.c',
'compat/win32/path-utils.c',
'compat/win32/pthread.c',
'compat/win32/syslog.c',
+ 'compat/win32/wsl.c',
'compat/win32mmap.c',
'compat/nedmalloc/nedmalloc.c',
]
libgit_c_args += [
'-DDETECT_MSYS_TTY',
- '-DENSURE_MSYSTEM_IS_SET',
'-DNATIVE_CRLF',
'-DNOGDI',
'-DNO_POSIX_GOODIES',
@@ -1300,6 +1302,18 @@ elif host_machine.system() == 'windows'
'-D__USE_MINGW_ANSI_STDIO=0',
]
+ msystem = get_option('msystem')
+ if msystem != ''
+ mingw_prefix = get_option('mingw_prefix')
+ if mingw_prefix == ''
+ mingw_prefix = '/' + msystem.to_lower()
+ endif
+ libgit_c_args += [
+ '-DENSURE_MSYSTEM_IS_SET="' + msystem + '"',
+ '-DMINGW_PREFIX="' + mingw_prefix + '"'
+ ]
+ endif
+
libgit_dependencies += compiler.find_library('ntdll')
libgit_include_directories += 'compat/win32'
if compiler.get_id() == 'msvc'
diff --git a/meson_options.txt b/meson_options.txt
index 659cbb218f46e0..4c77708a28aa01 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -21,6 +21,10 @@ option('runtime_prefix', type: 'boolean', value: false,
description: 'Resolve ancillary tooling and support files relative to the location of the runtime binary instead of hard-coding them into the binary.')
option('sane_tool_path', type: 'array', value: [],
description: 'An array of paths to pick up tools from in case the normal tools are broken or lacking.')
+option('msystem', type: 'string', value: '',
+ description: 'Fall-back on Windows when MSYSTEM is not set.')
+option('mingw_prefix', type: 'string', value: '',
+ description: 'Fall-back on Windows when MINGW_PREFIX is not set.')
# Build information compiled into Git and other parts like documentation.
option('build_date', type: 'string', value: '',
diff --git a/object-file.c b/object-file.c
index f0b029ff0b2cb0..5ac6fbcecfce47 100644
--- a/object-file.c
+++ b/object-file.c
@@ -557,9 +557,9 @@ int odb_source_loose_read_object_info(struct odb_source *source,
}
static void hash_object_body(const struct git_hash_algo *algo, struct git_hash_ctx *c,
- const void *buf, unsigned long len,
+ const void *buf, size_t len,
struct object_id *oid,
- char *hdr, int *hdrlen)
+ char *hdr, size_t *hdrlen)
{
algo->init_fn(c);
git_hash_update(c, hdr, *hdrlen);
@@ -568,16 +568,16 @@ static void hash_object_body(const struct git_hash_algo *algo, struct git_hash_c
}
static void write_object_file_prepare(const struct git_hash_algo *algo,
- const void *buf, unsigned long len,
+ const void *buf, size_t len,
enum object_type type, struct object_id *oid,
- char *hdr, int *hdrlen)
+ char *hdr, size_t *hdrlen)
{
struct git_hash_ctx c;
/* Generate the header */
*hdrlen = format_object_header(hdr, *hdrlen, type, len);
- /* Sha1.. */
+ /* Hash (function pointers) computation */
hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen);
}
@@ -713,11 +713,11 @@ int finalize_object_file_flags(struct repository *repo,
}
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
- unsigned long len, enum object_type type,
+ size_t len, enum object_type type,
struct object_id *oid)
{
char hdr[MAX_HEADER_LEN];
- int hdrlen = sizeof(hdr);
+ size_t hdrlen = sizeof(hdr);
write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
}
@@ -1164,7 +1164,7 @@ int odb_source_loose_write_stream(struct odb_source *source,
}
int odb_source_loose_write_object(struct odb_source *source,
- const void *buf, unsigned long len,
+ const void *buf, size_t len,
enum object_type type, struct object_id *oid,
struct object_id *compat_oid_in, unsigned flags)
{
@@ -1172,7 +1172,7 @@ int odb_source_loose_write_object(struct odb_source *source,
const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
struct object_id compat_oid;
char hdr[MAX_HEADER_LEN];
- int hdrlen = sizeof(hdr);
+ size_t hdrlen = sizeof(hdr);
/* Generate compat_oid */
if (compat) {
diff --git a/object-file.h b/object-file.h
index f8d8805a18cc8c..7cea775fc47d6a 100644
--- a/object-file.h
+++ b/object-file.h
@@ -66,7 +66,7 @@ int odb_source_loose_freshen_object(struct odb_source *source,
const struct object_id *oid);
int odb_source_loose_write_object(struct odb_source *source,
- const void *buf, unsigned long len,
+ const void *buf, size_t len,
enum object_type type, struct object_id *oid,
struct object_id *compat_oid_in, unsigned flags);
@@ -195,7 +195,7 @@ int finalize_object_file_flags(struct repository *repo,
enum finalize_object_file_flags flags);
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
- unsigned long len, enum object_type type,
+ size_t len, enum object_type type,
struct object_id *oid);
/* Helper to check and "touch" a file */
diff --git a/parallel-checkout.c b/parallel-checkout.c
index 0bf4bd6d4abd8c..8fadb7c804bc02 100644
--- a/parallel-checkout.c
+++ b/parallel-checkout.c
@@ -640,6 +640,7 @@ static void write_items_sequentially(struct checkout *state)
{
size_t i;
+ flush_fscache();
for (i = 0; i < parallel_checkout.nr; i++) {
struct parallel_checkout_item *pc_item = ¶llel_checkout.items[i];
write_pc_item(pc_item, state);
diff --git a/path.c b/path.c
index c28535785946c2..263be798dc8d56 100644
--- a/path.c
+++ b/path.c
@@ -1328,6 +1328,45 @@ char *strip_path_suffix(const char *path, const char *suffix)
return offset == -1 ? NULL : xstrndup(path, offset);
}
+int is_mount_point_via_stat(struct strbuf *path)
+{
+ size_t len = path->len;
+ dev_t current_dev;
+ struct stat st;
+
+ if (!strcmp("/", path->buf))
+ return 1;
+
+ strbuf_addstr(path, "/.");
+ if (lstat(path->buf, &st)) {
+ /*
+ * If we cannot access the current directory, we cannot say
+ * that it is a bind mount.
+ */
+ strbuf_setlen(path, len);
+ return 0;
+ }
+ current_dev = st.st_dev;
+
+ /* Now look at the parent directory */
+ strbuf_addch(path, '.');
+ if (lstat(path->buf, &st)) {
+ /*
+ * If we cannot access the parent directory, we cannot say
+ * that it is a bind mount.
+ */
+ strbuf_setlen(path, len);
+ return 0;
+ }
+ strbuf_setlen(path, len);
+
+ /*
+ * If the device ID differs between current and parent directory,
+ * then it is a bind mount.
+ */
+ return current_dev != st.st_dev;
+}
+
int daemon_avoid_alias(const char *p)
{
int sl, ndot;
@@ -1545,6 +1584,7 @@ int looks_like_command_line_option(const char *str)
char *xdg_config_home_for(const char *subdir, const char *filename)
{
const char *home, *config_home;
+ char *home_config = NULL;
assert(subdir);
assert(filename);
@@ -1553,10 +1593,26 @@ char *xdg_config_home_for(const char *subdir, const char *filename)
return mkpathdup("%s/%s/%s", config_home, subdir, filename);
home = getenv("HOME");
- if (home)
- return mkpathdup("%s/.config/%s/%s", home, subdir, filename);
+ if (home && *home)
+ home_config = mkpathdup("%s/.config/%s/%s", home, subdir, filename);
+
+ #ifdef WIN32
+ {
+ const char *appdata = getenv("APPDATA");
+ if (appdata && *appdata) {
+ char *appdata_config = mkpathdup("%s/Git/%s", appdata, filename);
+ if (file_exists(appdata_config)) {
+ if (home_config && file_exists(home_config))
+ warning("'%s' was ignored because '%s' exists.", home_config, appdata_config);
+ free(home_config);
+ return appdata_config;
+ }
+ free(appdata_config);
+ }
+ }
+ #endif
- return NULL;
+ return home_config;
}
char *xdg_config_home(const char *filename)
diff --git a/path.h b/path.h
index cbcad254a0a0b5..ecc02ec7d76c04 100644
--- a/path.h
+++ b/path.h
@@ -155,6 +155,7 @@ int normalize_path_copy(char *dst, const char *src);
int strbuf_normalize_path(struct strbuf *src);
int longest_ancestor_length(const char *path, struct string_list *prefixes);
char *strip_path_suffix(const char *path, const char *suffix);
+int is_mount_point_via_stat(struct strbuf *path);
int daemon_avoid_alias(const char *path);
/*
diff --git a/preload-index.c b/preload-index.c
index b222821b448526..ac0310008754a3 100644
--- a/preload-index.c
+++ b/preload-index.c
@@ -20,6 +20,8 @@
#include "trace2.h"
#include "config.h"
+static struct fscache *fscache;
+
/*
* Mostly randomly chosen maximum thread counts: we
* cap the parallelism to 20 threads, and we want
@@ -57,6 +59,7 @@ static void *preload_thread(void *_data)
nr = index->cache_nr - p->offset;
last_nr = nr;
+ enable_fscache(nr);
do {
struct cache_entry *ce = *cep++;
struct stat st;
@@ -100,6 +103,7 @@ static void *preload_thread(void *_data)
pthread_mutex_unlock(&pd->mutex);
}
cache_def_clear(&cache);
+ merge_fscache(fscache);
return NULL;
}
@@ -118,6 +122,7 @@ void preload_index(struct index_state *index,
if (!HAVE_THREADS || !core_preload_index)
return;
+ fscache = getcache_fscache();
threads = index->cache_nr / THREAD_COST;
if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0))
threads = 2;
diff --git a/read-cache.c b/read-cache.c
index 5049f9baca9c5e..f8730352d0d3c6 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1515,6 +1515,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
+ enable_fscache(0);
/*
* Use the multi-threaded preload_index() to refresh most of the
* cache entries quickly then in the single threaded loop below,
@@ -1609,6 +1610,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
display_progress(progress, istate->cache_nr);
stop_progress(&progress);
trace_performance_leave("refresh index");
+ disable_fscache();
return has_errors;
}
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 0537a72b2af9e0..3cff96abe17ade 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -2112,7 +2112,7 @@ static int create_ref_symlink(struct ref_lock *lock, const char *target)
ref_path = get_locked_file_path(&lock->lk);
unlink(ref_path);
- ret = symlink(target, ref_path);
+ ret = create_symlink(NULL, target, ref_path);
free(ref_path);
if (ret)
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index b124404663edf6..23b18837c8efee 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -389,6 +389,7 @@ static struct ref_store *reftable_be_init(struct repository *repo,
refs_compute_filesystem_location(gitdir, payload, &is_worktree, &refdir,
&ref_common_dir);
+ reftable_set_alloc(malloc, realloc, free);
base_ref_store_init(&refs->base, repo, refdir.buf, &refs_be_reftable);
strmap_init(&refs->worktree_backends);
refs->store_flags = store_flags;
diff --git a/repository.c b/repository.c
index 9e5537f53961ed..2a520c46573c3e 100644
--- a/repository.c
+++ b/repository.c
@@ -153,7 +153,7 @@ static void repo_set_commondir(struct repository *repo,
{
struct strbuf sb = STRBUF_INIT;
- free(repo->commondir);
+ FREE_AND_NULL(repo->commondir);
if (commondir) {
repo->different_commondir = 1;
diff --git a/run-command.c b/run-command.c
index 32c290ee6a221f..7f587f3dd634ff 100644
--- a/run-command.c
+++ b/run-command.c
@@ -581,6 +581,7 @@ static int wait_or_whine(pid_t pid, const char *argv0, int in_signal)
*/
code += 128;
} else if (WIFEXITED(status)) {
+ warn_about_git_lfs_on_windows7(status, argv0);
code = WEXITSTATUS(status);
} else {
if (!in_signal)
diff --git a/send-pack.c b/send-pack.c
index 07ecfae4de92a2..e950144d9e5a93 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -501,7 +501,7 @@ int send_pack(struct repository *r,
int need_pack_data = 0;
int allow_deleting_refs = 0;
int status_report = 0;
- int use_sideband = 0;
+ int use_sideband = 1;
int quiet_supported = 0;
int agent_supported = 0;
int advertise_sid = 0;
@@ -525,6 +525,7 @@ int send_pack(struct repository *r,
goto out;
}
+ repo_config_get_bool(r, "sendpack.sideband", &use_sideband);
repo_config_get_bool(r, "push.negotiate", &push_negotiate);
if (push_negotiate) {
trace2_region_enter("send_pack", "push_negotiate", r);
@@ -546,8 +547,7 @@ int send_pack(struct repository *r,
allow_deleting_refs = 1;
if (server_supports("ofs-delta"))
args->use_ofs_delta = 1;
- if (server_supports("side-band-64k"))
- use_sideband = 1;
+ use_sideband = use_sideband && server_supports("side-band-64k");
if (server_supports("quiet"))
quiet_supported = 1;
if (server_supports("agent"))
diff --git a/setup.c b/setup.c
index 7ec4427368a2a7..55f45345b4a2b5 100644
--- a/setup.c
+++ b/setup.c
@@ -1919,10 +1919,19 @@ const char *setup_git_directory_gently(int *nongit_ok)
break;
case GIT_DIR_INVALID_OWNERSHIP:
if (!nongit_ok) {
+ struct strbuf prequoted = STRBUF_INIT;
struct strbuf quoted = STRBUF_INIT;
strbuf_complete(&report, '\n');
- sq_quote_buf_pretty("ed, dir.buf);
+
+#ifdef __MINGW32__
+ if (dir.buf[0] == '/')
+ strbuf_addstr(&prequoted, "%(prefix)/");
+#endif
+
+ strbuf_add(&prequoted, dir.buf, dir.len);
+ sq_quote_buf_pretty("ed, prequoted.buf);
+
die(_("detected dubious ownership in repository at '%s'\n"
"%s"
"To add an exception for this directory, call:\n"
@@ -2291,7 +2300,7 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
if (strbuf_readlink(&lnk, template_path->buf,
st_template.st_size) < 0)
die_errno(_("cannot readlink '%s'"), template_path->buf);
- if (symlink(lnk.buf, path->buf))
+ if (create_symlink(NULL, lnk.buf, path->buf))
die_errno(_("cannot symlink '%s' '%s'"),
lnk.buf, path->buf);
strbuf_release(&lnk);
@@ -2570,7 +2579,7 @@ static int create_default_files(const char *template_path,
repo_git_path_replace(the_repository, &path, "tXXXXXX");
if (!close(xmkstemp(path.buf)) &&
!unlink(path.buf) &&
- !symlink("testing", path.buf) &&
+ !create_symlink(NULL, "testing", path.buf) &&
!lstat(path.buf, &st1) &&
S_ISLNK(st1.st_mode))
unlink(path.buf); /* good */
diff --git a/sha1dc_git.c b/sha1dc_git.c
index 9b675a046ee699..fe58d7962a30c9 100644
--- a/sha1dc_git.c
+++ b/sha1dc_git.c
@@ -27,10 +27,9 @@ void git_SHA1DCFinal(unsigned char hash[20], SHA1_CTX *ctx)
/*
* Same as SHA1DCUpdate, but adjust types to match git's usual interface.
*/
-void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *vdata, unsigned long len)
+void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *vdata, size_t len)
{
const char *data = vdata;
- /* We expect an unsigned long, but sha1dc only takes an int */
while (len > INT_MAX) {
SHA1DCUpdate(ctx, data, INT_MAX);
data += INT_MAX;
diff --git a/sha1dc_git.h b/sha1dc_git.h
index f6f880cabea382..0bcf1aa84b7241 100644
--- a/sha1dc_git.h
+++ b/sha1dc_git.h
@@ -15,7 +15,7 @@ void git_SHA1DCInit(SHA1_CTX *);
#endif
void git_SHA1DCFinal(unsigned char [20], SHA1_CTX *);
-void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *data, unsigned long len);
+void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *data, size_t len);
#define platform_SHA_IS_SHA1DC /* used by "test-tool sha1-is-sha1dc" */
diff --git a/sideband.c b/sideband.c
index 1ed6614eaf1baf..526ddee544d81d 100644
--- a/sideband.c
+++ b/sideband.c
@@ -26,6 +26,12 @@ static struct keyword_entry keywords[] = {
{ "error", GIT_COLOR_BOLD_RED },
};
+static enum {
+ ALLOW_NO_CONTROL_CHARACTERS = 0,
+ ALLOW_ALL_CONTROL_CHARACTERS = 1,
+ ALLOW_ANSI_COLOR_SEQUENCES = 2
+} allow_control_characters = ALLOW_ANSI_COLOR_SEQUENCES;
+
/* Returns a color setting (GIT_COLOR_NEVER, etc). */
static enum git_colorbool use_sideband_colors(void)
{
@@ -39,6 +45,25 @@ static enum git_colorbool use_sideband_colors(void)
if (use_sideband_colors_cached != GIT_COLOR_UNKNOWN)
return use_sideband_colors_cached;
+ switch (repo_config_get_maybe_bool(the_repository, "sideband.allowcontrolcharacters", &i)) {
+ case 0: /* Boolean value */
+ allow_control_characters = i ? ALLOW_ALL_CONTROL_CHARACTERS :
+ ALLOW_NO_CONTROL_CHARACTERS;
+ break;
+ case -1: /* non-Boolean value */
+ if (repo_config_get_string_tmp(the_repository, "sideband.allowcontrolcharacters",
+ &value))
+ ; /* huh? `get_maybe_bool()` returned -1 */
+ else if (!strcmp(value, "color"))
+ allow_control_characters = ALLOW_ANSI_COLOR_SEQUENCES;
+ else
+ warning(_("unrecognized value for `sideband."
+ "allowControlCharacters`: '%s'"), value);
+ break;
+ default:
+ break; /* not configured */
+ }
+
if (!repo_config_get_string_tmp(the_repository, key, &value))
use_sideband_colors_cached = git_config_colorbool(key, value);
else if (!repo_config_get_string_tmp(the_repository, "color.ui", &value))
@@ -66,6 +91,55 @@ void list_config_color_sideband_slots(struct string_list *list, const char *pref
list_config_item(list, prefix, keywords[i].keyword);
}
+static int handle_ansi_color_sequence(struct strbuf *dest, const char *src, int n)
+{
+ int i;
+
+ /*
+ * Valid ANSI color sequences are of the form
+ *
+ * ESC [ [ [; ]*] m
+ */
+
+ if (allow_control_characters != ALLOW_ANSI_COLOR_SEQUENCES ||
+ n < 3 || src[0] != '\x1b' || src[1] != '[')
+ return 0;
+
+ for (i = 2; i < n; i++) {
+ if (src[i] == 'm') {
+ strbuf_add(dest, src, i + 1);
+ return i;
+ }
+ if (!isdigit(src[i]) && src[i] != ';')
+ break;
+ }
+
+ return 0;
+}
+
+static void strbuf_add_sanitized(struct strbuf *dest, const char *src, int n)
+{
+ int i;
+
+ if (allow_control_characters == ALLOW_ALL_CONTROL_CHARACTERS) {
+ strbuf_add(dest, src, n);
+ return;
+ }
+
+ strbuf_grow(dest, n);
+ for (; n && *src; src++, n--) {
+ if (!iscntrl(*src) || *src == '\t' || *src == '\n')
+ strbuf_addch(dest, *src);
+ else if ((i = handle_ansi_color_sequence(dest, src, n))) {
+ src += i;
+ n -= i;
+ } else {
+ strbuf_addch(dest, '^');
+ strbuf_addch(dest, 0x40 + *src);
+ }
+ }
+}
+
/*
* Optionally highlight one keyword in remote output if it appears at the start
* of the line. This should be called for a single line only, which is
@@ -81,7 +155,7 @@ static void maybe_colorize_sideband(struct strbuf *dest, const char *src, int n)
int i;
if (!want_color_stderr(use_sideband_colors())) {
- strbuf_add(dest, src, n);
+ strbuf_add_sanitized(dest, src, n);
return;
}
@@ -114,7 +188,7 @@ static void maybe_colorize_sideband(struct strbuf *dest, const char *src, int n)
}
}
- strbuf_add(dest, src, n);
+ strbuf_add_sanitized(dest, src, n);
}
diff --git a/t/README b/t/README
index adbbd9acf4ab27..f19468151410eb 100644
--- a/t/README
+++ b/t/README
@@ -479,6 +479,9 @@ GIT_TEST_NAME_HASH_VERSION=, when set, causes 'git pack-objects' to
assume '--name-hash-version='.
+GIT_TEST_FSCACHE= exercises the uncommon fscache code path
+which adds a cache below mingw's lstat and dirent implementations.
+
Naming Tests
------------
diff --git a/t/helper/meson.build b/t/helper/meson.build
index 675e64c0101b61..cba4a9bf4f1434 100644
--- a/t/helper/meson.build
+++ b/t/helper/meson.build
@@ -29,6 +29,7 @@ test_tool_sources = [
'test-hash.c',
'test-hashmap.c',
'test-hexdump.c',
+ 'test-iconv.c',
'test-json-writer.c',
'test-lazy-init-name-hash.c',
'test-match-trees.c',
diff --git a/t/helper/test-iconv.c b/t/helper/test-iconv.c
new file mode 100644
index 00000000000000..d3c772fddf990b
--- /dev/null
+++ b/t/helper/test-iconv.c
@@ -0,0 +1,47 @@
+#include "test-tool.h"
+#include "git-compat-util.h"
+#include "strbuf.h"
+#include "gettext.h"
+#include "parse-options.h"
+#include "utf8.h"
+
+int cmd__iconv(int argc, const char **argv)
+{
+ struct strbuf buf = STRBUF_INIT;
+ char *from = NULL, *to = NULL, *p;
+ size_t len;
+ int ret = 0;
+ const char * const iconv_usage[] = {
+ N_("test-helper --iconv []"),
+ NULL
+ };
+ struct option options[] = {
+ OPT_STRING('f', "from-code", &from, "encoding", "from"),
+ OPT_STRING('t', "to-code", &to, "encoding", "to"),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, NULL, options,
+ iconv_usage, 0);
+
+ if (argc > 1 || !from || !to)
+ usage_with_options(iconv_usage, options);
+
+ if (!argc) {
+ if (strbuf_read(&buf, 0, 2048) < 0)
+ die_errno("Could not read from stdin");
+ } else if (strbuf_read_file(&buf, argv[0], 2048) < 0)
+ die_errno("Could not read from '%s'", argv[0]);
+
+ p = reencode_string_len(buf.buf, buf.len, to, from, &len);
+ if (!p)
+ die_errno("Could not reencode");
+ if (write(1, p, len) < 0)
+ ret = !!error_errno("Could not write %"PRIuMAX" bytes",
+ (uintmax_t)len);
+
+ strbuf_release(&buf);
+ free(p);
+
+ return ret;
+}
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index a7abc618b3887e..9d1b41c8e39b89 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -39,6 +39,7 @@ static struct test_cmd cmds[] = {
{ "hashmap", cmd__hashmap },
{ "hash-speed", cmd__hash_speed },
{ "hexdump", cmd__hexdump },
+ { "iconv", cmd__iconv },
{ "json-writer", cmd__json_writer },
{ "lazy-init-name-hash", cmd__lazy_init_name_hash },
{ "match-trees", cmd__match_trees },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 7f150fa1eb9ad2..e18e5a9ed9de81 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -32,6 +32,7 @@ int cmd__getcwd(int argc, const char **argv);
int cmd__hashmap(int argc, const char **argv);
int cmd__hash_speed(int argc, const char **argv);
int cmd__hexdump(int argc, const char **argv);
+int cmd__iconv(int argc, const char **argv);
int cmd__json_writer(int argc, const char **argv);
int cmd__lazy_init_name_hash(int argc, const char **argv);
int cmd__match_trees(int argc, const char **argv);
diff --git a/t/interop/interop-lib.sh b/t/interop/interop-lib.sh
index 1b5864d2a7f22c..1facc69d97741a 100644
--- a/t/interop/interop-lib.sh
+++ b/t/interop/interop-lib.sh
@@ -4,6 +4,10 @@
. ../../GIT-BUILD-OPTIONS
INTEROP_ROOT=$(pwd)
BUILD_ROOT=$INTEROP_ROOT/build
+case "$PATH" in
+*\;*) PATH_SEP=\; ;;
+*) PATH_SEP=: ;;
+esac
build_version () {
if test -z "$1"
@@ -57,7 +61,7 @@ wrap_git () {
write_script "$1" <<-EOF
GIT_EXEC_PATH="$2"
export GIT_EXEC_PATH
- PATH="$2:\$PATH"
+ PATH="$2$PATH_SEP\$PATH"
export GIT_EXEC_PATH
exec git "\$@"
EOF
@@ -71,7 +75,7 @@ generate_wrappers () {
echo >&2 fatal: test tried to run generic git: $*
exit 1
EOF
- PATH=$(pwd)/.bin:$PATH
+ PATH=$(pwd)/.bin$PATH_SEP$PATH
}
VERSION_A=${GIT_TEST_VERSION_A:-$VERSION_A}
diff --git a/t/test-binary-1.png b/t/lib-diff/test-binary-1.png
similarity index 100%
rename from t/test-binary-1.png
rename to t/lib-diff/test-binary-1.png
diff --git a/t/test-binary-2.png b/t/lib-diff/test-binary-2.png
similarity index 100%
rename from t/test-binary-2.png
rename to t/lib-diff/test-binary-2.png
diff --git a/t/lib-httpd.sh b/t/lib-httpd.sh
index 4c76e813e396bf..7150a2a2f2c5ce 100644
--- a/t/lib-httpd.sh
+++ b/t/lib-httpd.sh
@@ -168,6 +168,7 @@ prepare_httpd() {
install_script apply-one-time-script.sh
install_script nph-custom-auth.sh
install_script http-429.sh
+ install_script ntlm-handshake.sh
ln -s "$LIB_HTTPD_MODULE_PATH" "$HTTPD_ROOT_PATH/modules"
diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf
index 40a690b0bb7c9b..7a5c3620cfe901 100644
--- a/t/lib-httpd/apache.conf
+++ b/t/lib-httpd/apache.conf
@@ -155,6 +155,13 @@ SetEnv PERL_PATH ${PERL_PATH}
CGIPassAuth on
+
+ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
+ SetEnv GIT_HTTP_EXPORT_ALL
+
+ CGIPassAuth on
+
+
ScriptAlias /smart/incomplete_length/git-upload-pack incomplete-length-upload-pack-v2-http.sh/
ScriptAlias /smart/incomplete_body/git-upload-pack incomplete-body-upload-pack-v2-http.sh/
ScriptAlias /smart/no_report/git-receive-pack error-no-report.sh/
@@ -166,6 +173,7 @@ ScriptAlias /error/ error.sh/
ScriptAliasMatch /one_time_script/(.*) apply-one-time-script.sh/$1
ScriptAliasMatch /http_429/(.*) http-429.sh/$1
ScriptAliasMatch /custom_auth/(.*) nph-custom-auth.sh/$1
+ScriptAliasMatch /ntlm_auth/(.*) ntlm-handshake.sh/$1
Options FollowSymlinks
diff --git a/t/lib-httpd/ntlm-handshake.sh b/t/lib-httpd/ntlm-handshake.sh
new file mode 100755
index 00000000000000..3cf1266e40f20a
--- /dev/null
+++ b/t/lib-httpd/ntlm-handshake.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+case "$HTTP_AUTHORIZATION" in
+'')
+ # No Authorization header -> send NTLM challenge
+ echo "Status: 401 Unauthorized"
+ echo "WWW-Authenticate: NTLM"
+ echo
+ ;;
+"NTLM TlRMTVNTUAAB"*)
+ # Type 1 -> respond with Type 2 challenge (hardcoded)
+ echo "Status: 401 Unauthorized"
+ # Base64-encoded version of the Type 2 challenge:
+ # signature: 'NTLMSSP\0'
+ # message_type: 2
+ # target_name: 'NTLM-GIT-SERVER'
+ # flags: 0xa2898205 =
+ # NEGOTIATE_UNICODE, REQUEST_TARGET, NEGOTIATE_NT_ONLY,
+ # TARGET_TYPE_SERVER, TARGET_TYPE_SHARE, REQUEST_NON_NT_SESSION_KEY,
+ # NEGOTIATE_VERSION, NEGOTIATE_128, NEGOTIATE_56
+ # challenge: 0xfa3dec518896295b
+ # context: '0000000000000000'
+ # target_info_present: true
+ # target_info_len: 128
+ # version: '10.0 (build 19041)'
+ echo "WWW-Authenticate: NTLM TlRMTVNTUAACAAAAHgAeADgAAAAFgomi+j3sUYiWKVsAAAAAAAAAAIAAgABWAAAACgBhSgAAAA9OAFQATABNAC0ARwBJAFQALQBTAEUAUgBWAEUAUgACABIAVwBPAFIASwBHAFIATwBVAFAAAQAeAE4AVABMAE0ALQBHAEkAVAAtAFMARQBSAFYARQBSAAQAEgBXAE8AUgBLAEcAUgBPAFUAUAADAB4ATgBUAEwATQAtAEcASQBUAC0AUwBFAFIAVgBFAFIABwAIAACfOcZKYNwBAAAAAA=="
+ echo
+ ;;
+"NTLM TlRMTVNTUAAD"*)
+ # Type 3 -> accept without validation
+ exec "$GIT_EXEC_PATH"/git-http-backend
+ ;;
+*)
+ echo "Status: 500 Unrecognized"
+ echo
+ echo "Unhandled auth: '$HTTP_AUTHORIZATION'"
+ ;;
+esac
diff --git a/t/lib-proto-disable.sh b/t/lib-proto-disable.sh
index 890622be81642b..9db481e1be15b2 100644
--- a/t/lib-proto-disable.sh
+++ b/t/lib-proto-disable.sh
@@ -214,7 +214,7 @@ setup_ext_wrapper () {
cd "$TRASH_DIRECTORY/remote" &&
eval "$*"
EOF
- PATH=$TRASH_DIRECTORY:$PATH &&
+ PATH=$TRASH_DIRECTORY$PATH_SEP$PATH &&
export TRASH_DIRECTORY
'
}
diff --git a/t/meson.build b/t/meson.build
index 7528e5cda5fef0..81591f64bf1361 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -6,6 +6,7 @@ clar_test_suites = [
'unit-tests/u-hashmap.c',
'unit-tests/u-list-objects-filter-options.c',
'unit-tests/u-mem-pool.c',
+ 'unit-tests/u-mingw.c',
'unit-tests/u-oid-array.c',
'unit-tests/u-oidmap.c',
'unit-tests/u-oidtree.c',
@@ -274,6 +275,8 @@ integration_tests = [
't2026-checkout-pathspec-file.sh',
't2027-checkout-track.sh',
't2030-unresolve-info.sh',
+ 't2031-checkout-long-paths.sh',
+ 't2040-checkout-symlink-attr.sh',
't2050-git-dir-relative.sh',
't2060-switch.sh',
't2070-restore.sh',
@@ -873,6 +876,7 @@ integration_tests = [
't7105-reset-patch.sh',
't7106-reset-unborn-branch.sh',
't7107-reset-pathspec-file.sh',
+ 't7108-reset-stdin.sh',
't7110-reset-merge.sh',
't7111-reset-table.sh',
't7112-reset-submodule.sh',
@@ -903,6 +907,7 @@ integration_tests = [
't7424-submodule-mixed-ref-formats.sh',
't7425-submodule-gitdir-path-extension.sh',
't7426-submodule-get-default-remote.sh',
+ 't7429-submodule-long-path.sh',
't7450-bad-git-dotfiles.sh',
't7500-commit-template-squash-signoff.sh',
't7501-commit-basic-functionality.sh',
@@ -977,6 +982,7 @@ integration_tests = [
't8014-blame-ignore-fuzzy.sh',
't8015-blame-diff-algorithm.sh',
't8020-last-modified.sh',
+ 't8100-git-survey.sh',
't9001-send-email.sh',
't9002-column.sh',
't9003-help-autocorrect.sh',
diff --git a/t/t0014-alias.sh b/t/t0014-alias.sh
index 68b4903cbfa595..6df1890d4b23e4 100755
--- a/t/t0014-alias.sh
+++ b/t/t0014-alias.sh
@@ -52,10 +52,10 @@ test_expect_success 'looping aliases - deprecated builtins' '
#'
test_expect_success 'run-command formats empty args properly' '
- test_must_fail env GIT_TRACE=1 git frotz a "" b " " c 2>actual.raw &&
- sed -ne "/run_command:/s/.*trace: run_command: //p" actual.raw >actual &&
- echo "git-frotz a '\'''\'' b '\'' '\'' c" >expect &&
- test_cmp expect actual
+ test_must_fail env GIT_TRACE=1 git frotz a "" b " " c 2>actual.raw &&
+ sed -ne "/run_command: git-frotz/s/.*trace: run_command: //p" actual.raw >actual &&
+ echo "git-frotz a '\'''\'' b '\'' '\'' c" >expect &&
+ test_cmp expect actual
'
test_expect_success 'tracing a shell alias with arguments shows trace of prepared command' '
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh
index f0d50d769e9fc5..0c5975336f2104 100755
--- a/t/t0021-conversion.sh
+++ b/t/t0021-conversion.sh
@@ -8,7 +8,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-terminal.sh
-PATH=$PWD:$PATH
+PATH=$PWD$PATH_SEP$PATH
TEST_ROOT="$(pwd)"
write_script <<\EOF "$TEST_ROOT/rot13.sh"
diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh
index 8545cdfab559b4..5abfa202c19dca 100755
--- a/t/t0060-path-utils.sh
+++ b/t/t0060-path-utils.sh
@@ -147,25 +147,25 @@ ancestor /foo /fo -1
ancestor /foo /foo -1
ancestor /foo /bar -1
ancestor /foo /foo/bar -1
-ancestor /foo /foo:/bar -1
-ancestor /foo /:/foo:/bar 0
-ancestor /foo /foo:/:/bar 0
-ancestor /foo /:/bar:/foo 0
+ancestor /foo "/foo$PATH_SEP/bar" -1
+ancestor /foo "/$PATH_SEP/foo$PATH_SEP/bar" 0
+ancestor /foo "/foo$PATH_SEP/$PATH_SEP/bar" 0
+ancestor /foo "/$PATH_SEP/bar$PATH_SEP/foo" 0
ancestor /foo/bar / 0
ancestor /foo/bar /fo -1
ancestor /foo/bar /foo 4
ancestor /foo/bar /foo/ba -1
-ancestor /foo/bar /:/fo 0
-ancestor /foo/bar /foo:/foo/ba 4
+ancestor /foo/bar "/$PATH_SEP/fo" 0
+ancestor /foo/bar "/foo$PATH_SEP/foo/ba" 4
ancestor /foo/bar /bar -1
ancestor /foo/bar /fo -1
-ancestor /foo/bar /foo:/bar 4
-ancestor /foo/bar /:/foo:/bar 4
-ancestor /foo/bar /foo:/:/bar 4
-ancestor /foo/bar /:/bar:/fo 0
-ancestor /foo/bar /:/bar 0
+ancestor /foo/bar "/foo$PATH_SEP/bar" 4
+ancestor /foo/bar "/$PATH_SEP/foo$PATH_SEP/bar" 4
+ancestor /foo/bar "/foo$PATH_SEP/$PATH_SEP/bar" 4
+ancestor /foo/bar "/$PATH_SEP/bar$PATH_SEP/fo" 0
+ancestor /foo/bar "/$PATH_SEP/bar" 0
ancestor /foo/bar /foo 4
-ancestor /foo/bar /foo:/bar 4
+ancestor /foo/bar "/foo$PATH_SEP/bar" 4
ancestor /foo/bar /bar -1
# Windows-specific: DOS drives, network shares
@@ -281,6 +281,14 @@ test_expect_success SYMLINKS 'real path works on symlinks' '
test_cmp expect actual
'
+test_expect_success MINGW 'real path works near drive root' '
+ # we need a non-existing path at the drive root; simply skip if C:/xyz exists
+ if test ! -e C:/xyz
+ then
+ test C:/xyz = $(test-tool path-utils real_path C:/xyz)
+ fi
+'
+
test_expect_success SYMLINKS 'prefix_path works with absolute paths to work tree symlinks' '
ln -s target symlink &&
echo "symlink" >expect &&
@@ -602,7 +610,8 @@ test_expect_success !VALGRIND,RUNTIME_PREFIX,CAN_EXEC_IN_PWD 'RUNTIME_PREFIX wor
echo "echo HERE" | write_script pretend/libexec/git-core/git-here &&
GIT_EXEC_PATH= ./pretend/bin/git here >actual &&
echo HERE >expect &&
- test_cmp expect actual'
+ test_cmp expect actual
+'
test_expect_success !VALGRIND,RUNTIME_PREFIX,CAN_EXEC_IN_PWD '%(prefix)/ works' '
git config yes.path "%(prefix)/yes" &&
@@ -611,4 +620,34 @@ test_expect_success !VALGRIND,RUNTIME_PREFIX,CAN_EXEC_IN_PWD '%(prefix)/ works'
test_cmp expect actual
'
+test_expect_success MINGW,RUNTIME_PREFIX 'MSYSTEM/PATH is adjusted if necessary' '
+ if test -z "$MINGW_PREFIX"
+ then
+ MINGW_PREFIX="/$(echo "${MSYSTEM:-MINGW64}" | tr A-Z a-z)"
+ fi &&
+ mkdir -p "$HOME"/bin pretend"$MINGW_PREFIX"/bin \
+ pretend"$MINGW_PREFIX"/libexec/git-core pretend/usr/bin &&
+ cp "$GIT_EXEC_PATH"/git.exe pretend"$MINGW_PREFIX"/bin/ &&
+ cp "$GIT_EXEC_PATH"/git.exe pretend"$MINGW_PREFIX"/libexec/git-core/ &&
+ # copy the .dll files, if any (happens when building via CMake)
+ if test -n "$(ls "$GIT_EXEC_PATH"/*.dll 2>/dev/null)"
+ then
+ cp "$GIT_EXEC_PATH"/*.dll pretend"$MINGW_PREFIX"/bin/ &&
+ cp "$GIT_EXEC_PATH"/*.dll pretend"$MINGW_PREFIX"/libexec/git-core/
+ fi &&
+ echo "env | grep MSYSTEM=" | write_script "$HOME"/bin/git-test-home &&
+ echo "echo ${MINGW_PREFIX#/}" | write_script pretend"$MINGW_PREFIX"/bin/git-test-bin &&
+ echo "echo usr" | write_script pretend/usr/bin/git-test-bin2 &&
+
+ (
+ MSYSTEM= &&
+ GIT_EXEC_PATH= &&
+ pretend"$MINGW_PREFIX"/libexec/git-core/git.exe test-home >actual &&
+ pretend"$MINGW_PREFIX"/libexec/git-core/git.exe test-bin >>actual &&
+ pretend"$MINGW_PREFIX"/bin/git.exe test-bin2 >>actual
+ ) &&
+ test_write_lines MSYSTEM=$MSYSTEM "${MINGW_PREFIX#/}" usr >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index 60cfe65979e215..905e90e1f72541 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -69,7 +69,7 @@ test_expect_success 'run_command does not try to execute a directory' '
cat bin2/greet
EOF
- PATH=$PWD/bin1:$PWD/bin2:$PATH \
+ PATH=$PWD/bin1$PATH_SEP$PWD/bin2$PATH_SEP$PATH \
test-tool run-command run-command greet >actual 2>err &&
test_cmp bin2/greet actual &&
test_must_be_empty err
@@ -86,7 +86,7 @@ test_expect_success POSIXPERM 'run_command passes over non-executable file' '
cat bin2/greet
EOF
- PATH=$PWD/bin1:$PWD/bin2:$PATH \
+ PATH=$PWD/bin1$PATH_SEP$PWD/bin2$PATH_SEP$PATH \
test-tool run-command run-command greet >actual 2>err &&
test_cmp bin2/greet actual &&
test_must_be_empty err
@@ -106,7 +106,7 @@ test_expect_success POSIXPERM,SANITY 'unreadable directory in PATH' '
git config alias.nitfol "!echo frotz" &&
chmod a-rx local-command &&
(
- PATH=./local-command:$PATH &&
+ PATH=./local-command$PATH_SEP$PATH &&
git nitfol >actual
) &&
echo frotz >expect &&
diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
index 07aa834d33e248..e740ce362988a5 100755
--- a/t/t0300-credentials.sh
+++ b/t/t0300-credentials.sh
@@ -80,7 +80,7 @@ test_expect_success 'setup helper scripts' '
printf "username=\\007latrix Lestrange\\n"
EOF
- PATH="$PWD:$PATH"
+ PATH="$PWD$PATH_SEP$PATH"
'
test_expect_success 'credential_fill invokes helper' '
diff --git a/t/t0301-credential-cache.sh b/t/t0301-credential-cache.sh
index 6f7cfd9e33f633..a14032626192d0 100755
--- a/t/t0301-credential-cache.sh
+++ b/t/t0301-credential-cache.sh
@@ -12,7 +12,7 @@ test -z "$NO_UNIX_SOCKETS" || {
if test_have_prereq MINGW
then
service_running=$(sc query afunix | grep "4 RUNNING")
- test -z "$service_running" || {
+ test -n "$service_running" || {
skip_all='skipping credential-cache tests, unix sockets not available'
test_done
}
diff --git a/t/t1007-hash-object.sh b/t/t1007-hash-object.sh
index de076293b62a76..841a6671d1a3c1 100755
--- a/t/t1007-hash-object.sh
+++ b/t/t1007-hash-object.sh
@@ -49,6 +49,9 @@ test_expect_success 'setup' '
example sha1:ddd3f836d3e3fbb7ae289aa9ae83536f76956399
example sha256:b44fe1fe65589848253737db859bd490453510719d7424daab03daf0767b85ae
+
+ large5GB sha1:0be2be10a4c8764f32c4bf372a98edc731a4b204
+ large5GB sha256:dc18ca621300c8d3cfa505a275641ebab00de189859e022a975056882d313e64
EOF
'
@@ -258,4 +261,40 @@ test_expect_success '--stdin outside of repository (uses default hash)' '
test_cmp expect actual
'
+test_expect_success EXPENSIVE,SIZE_T_IS_64BIT,!LONG_IS_64BIT \
+ 'files over 4GB hash literally' '
+ test-tool genzeros $((5*1024*1024*1024)) >big &&
+ test_oid large5GB >expect &&
+ git hash-object --stdin --literally actual &&
+ test_cmp expect actual
+'
+
+test_expect_success EXPENSIVE,SIZE_T_IS_64BIT,!LONG_IS_64BIT \
+ 'files over 4GB hash correctly via --stdin' '
+ { test -f big || test-tool genzeros $((5*1024*1024*1024)) >big; } &&
+ test_oid large5GB >expect &&
+ git hash-object --stdin actual &&
+ test_cmp expect actual
+'
+
+test_expect_success EXPENSIVE,SIZE_T_IS_64BIT,!LONG_IS_64BIT \
+ 'files over 4GB hash correctly' '
+ { test -f big || test-tool genzeros $((5*1024*1024*1024)) >big; } &&
+ test_oid large5GB >expect &&
+ git hash-object -- big >actual &&
+ test_cmp expect actual
+'
+
+# This clean filter does nothing, other than excercising the interface.
+# We ensure that cleaning doesn't mangle large files on 64-bit Windows.
+test_expect_success EXPENSIVE,SIZE_T_IS_64BIT,!LONG_IS_64BIT \
+ 'hash filtered files over 4GB correctly' '
+ { test -f big || test-tool genzeros $((5*1024*1024*1024)) >big; } &&
+ test_oid large5GB >expect &&
+ test_config filter.null-filter.clean "cat" &&
+ echo "big filter=null-filter" >.gitattributes &&
+ git hash-object -- big >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh
index 3a14218b245d4c..529844e2862c74 100755
--- a/t/t1090-sparse-checkout-scope.sh
+++ b/t/t1090-sparse-checkout-scope.sh
@@ -106,4 +106,24 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs
test_cmp expect actual
'
+test_expect_success MINGW 'no unnecessary opendir() with fscache' '
+ git clone . fscache-test &&
+ (
+ cd fscache-test &&
+ git config core.fscache 1 &&
+ echo "/excluded/*" >.git/info/sparse-checkout &&
+ for f in $(test_seq 10)
+ do
+ sha1=$(echo $f | git hash-object -w --stdin) &&
+ git update-index --add \
+ --cacheinfo 100644,$sha1,excluded/$f || exit 1
+ done &&
+ test_tick &&
+ git commit -m excluded &&
+ GIT_TRACE_FSCACHE=1 git status >out 2>err &&
+ grep excluded err >grep.out &&
+ test_line_count = 1 grep.out
+ )
+'
+
test_done
diff --git a/t/t1504-ceiling-dirs.sh b/t/t1504-ceiling-dirs.sh
index e04420f4368b93..ff9fb804827b59 100755
--- a/t/t1504-ceiling-dirs.sh
+++ b/t/t1504-ceiling-dirs.sh
@@ -84,9 +84,9 @@ then
GIT_CEILING_DIRECTORIES="$TRASH_ROOT/top/"
test_fail subdir_ceil_at_top_slash
- GIT_CEILING_DIRECTORIES=":$TRASH_ROOT/top"
+ GIT_CEILING_DIRECTORIES="$PATH_SEP$TRASH_ROOT/top"
test_prefix subdir_ceil_at_top_no_resolve "sub/dir/"
- GIT_CEILING_DIRECTORIES=":$TRASH_ROOT/top/"
+ GIT_CEILING_DIRECTORIES="$PATH_SEP$TRASH_ROOT/top/"
test_prefix subdir_ceil_at_top_slash_no_resolve "sub/dir/"
fi
@@ -116,13 +116,13 @@ GIT_CEILING_DIRECTORIES="$TRASH_ROOT/subdi"
test_prefix subdir_ceil_at_subdi_slash "sub/dir/"
-GIT_CEILING_DIRECTORIES="/foo:$TRASH_ROOT/sub"
+GIT_CEILING_DIRECTORIES="/foo$PATH_SEP$TRASH_ROOT/sub"
test_fail second_of_two
-GIT_CEILING_DIRECTORIES="$TRASH_ROOT/sub:/bar"
+GIT_CEILING_DIRECTORIES="$TRASH_ROOT/sub$PATH_SEP/bar"
test_fail first_of_two
-GIT_CEILING_DIRECTORIES="/foo:$TRASH_ROOT/sub:/bar"
+GIT_CEILING_DIRECTORIES="/foo$PATH_SEP$TRASH_ROOT/sub$PATH_SEP/bar"
test_fail second_of_three
diff --git a/t/t1517-outside-repo.sh b/t/t1517-outside-repo.sh
index c824c1a25cf27e..37371e3f5e3e4c 100755
--- a/t/t1517-outside-repo.sh
+++ b/t/t1517-outside-repo.sh
@@ -120,7 +120,7 @@ do
merge-octopus | merge-one-file | merge-resolve | mergetool | \
mktag | p4 | p4.py | pickaxe | remote-ftp | remote-ftps | \
remote-http | remote-https | replay | send-email | \
- sh-i18n--envsubst | shell | show | stage | submodule | svn | \
+ sh-i18n--envsubst | shell | show | stage | submodule | survey | svn | \
upload-archive--writer | upload-pack | web--browse | whatchanged)
expect_outcome=expect_failure ;;
*)
diff --git a/t/t2031-checkout-long-paths.sh b/t/t2031-checkout-long-paths.sh
new file mode 100755
index 00000000000000..15416a1d6ee8c7
--- /dev/null
+++ b/t/t2031-checkout-long-paths.sh
@@ -0,0 +1,111 @@
+#!/bin/sh
+
+test_description='checkout long paths on Windows
+
+Ensures that Git for Windows can deal with long paths (>260) enabled via core.longpaths'
+
+. ./test-lib.sh
+
+if test_have_prereq !MINGW
+then
+ skip_all='skipping MINGW specific long paths test'
+ test_done
+fi
+
+test_expect_success setup '
+ p=longpathxx && # -> 10
+ p=$p$p$p$p$p && # -> 50
+ p=$p$p$p$p$p && # -> 250
+
+ path=${p}/longtestfile && # -> 263 (MAX_PATH = 260)
+
+ blob=$(echo foobar | git hash-object -w --stdin) &&
+
+ printf "100644 %s 0\t%s\n" "$blob" "$path" |
+ git update-index --add --index-info &&
+ git commit -m initial -q
+'
+
+test_expect_success 'checkout of long paths without core.longpaths fails' '
+ git config core.longpaths false &&
+ test_must_fail git checkout -f 2>error &&
+ grep -q "Filename too long" error &&
+ test ! -d longpa*
+'
+
+test_expect_success 'checkout of long paths with core.longpaths works' '
+ git config core.longpaths true &&
+ git checkout -f &&
+ test_path_is_file longpa*/longtestfile
+'
+
+test_expect_success 'update of long paths' '
+ echo frotz >>$(ls longpa*/longtestfile) &&
+ echo $path > expect &&
+ git ls-files -m > actual &&
+ test_cmp expect actual &&
+ git add $path &&
+ git commit -m second &&
+ git grep "frotz" HEAD -- $path
+'
+
+test_expect_success cleanup '
+ # bash cannot delete the trash dir if it contains a long path
+ # lets help cleaning up (unless in debug mode)
+ if test -z "$debug"
+ then
+ rm -rf longpa~1
+ fi
+'
+
+# check that the template used in the test won't be too long:
+abspath="$(pwd)"/testdir
+test ${#abspath} -gt 230 ||
+test_set_prereq SHORTABSPATH
+
+test_expect_success SHORTABSPATH 'clean up path close to MAX_PATH' '
+ p=/123456789abcdef/123456789abcdef/123456789abcdef/123456789abc/ef &&
+ p=y$p$p$p$p &&
+ subdir="x$(echo "$p" | tail -c $((253 - ${#abspath})) - )" &&
+ # Now, $abspath/$subdir has exactly 254 characters, and is inside CWD
+ p2="$abspath/$subdir" &&
+ test 254 = ${#p2} &&
+
+ # Be careful to overcome path limitations of the MSys tools and split
+ # the $subdir into two parts. ($subdir2 has to contain 16 chars and a
+ # slash somewhere following; that is why we asked for abspath <= 230 and
+ # why we placed a slash near the end of the $subdir template.)
+ subdir2=${subdir#????????????????*/} &&
+ subdir1=testdir/${subdir%/$subdir2} &&
+ mkdir -p "$subdir1" &&
+ i=0 &&
+ # The most important case is when absolute path is 258 characters long,
+ # and that will be when i == 4.
+ while test $i -le 7
+ do
+ mkdir -p $subdir2 &&
+ touch $subdir2/one-file &&
+ mv ${subdir2%%/*} "$subdir1/" &&
+ subdir2=z${subdir2} &&
+ i=$(($i+1)) ||
+ exit 1
+ done &&
+
+ # now check that git is able to clear the tree:
+ (cd testdir &&
+ git init &&
+ git config core.longpaths yes &&
+ git clean -fdx) &&
+ test ! -d "$subdir1"
+'
+
+test_expect_success SYMLINKS_WINDOWS 'leave drive-less, short paths intact' '
+ printf "/Program Files" >symlink-target &&
+ symlink_target_oid="$(git hash-object -w --stdin actual &&
+ grep " *PF *\\[\\\\Program Files\\]" actual
+'
+
+test_done
diff --git a/t/t2040-checkout-symlink-attr.sh b/t/t2040-checkout-symlink-attr.sh
new file mode 100755
index 00000000000000..e00c31d096ce88
--- /dev/null
+++ b/t/t2040-checkout-symlink-attr.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+test_description='checkout symlinks with `symlink` attribute on Windows
+
+Ensures that Git for Windows creates symlinks of the right type,
+as specified by the `symlink` attribute in `.gitattributes`.'
+
+# Tell MSYS to create native symlinks. Without this flag test-lib's
+# prerequisite detection for SYMLINKS doesn't detect the right thing.
+MSYS=winsymlinks:nativestrict && export MSYS
+
+. ./test-lib.sh
+
+if ! test_have_prereq MINGW,SYMLINKS
+then
+ skip_all='skipping $0: MinGW-only test, which requires symlink support.'
+ test_done
+fi
+
+# Adds a symlink to the index without clobbering the work tree.
+cache_symlink () {
+ sha=$(printf '%s' "$1" | git hash-object --stdin -w) &&
+ git update-index --add --cacheinfo 120000,$sha,"$2"
+}
+
+test_expect_success 'checkout symlinks with attr' '
+ cache_symlink file1 file-link &&
+ cache_symlink dir dir-link &&
+
+ printf "file-link symlink=file\ndir-link symlink=dir\n" >.gitattributes &&
+ git add .gitattributes &&
+
+ git checkout . &&
+
+ mkdir dir &&
+ echo "[a]b=c" >file1 &&
+ echo "[x]y=z" >dir/file2 &&
+
+ # MSYS2 is very forgiving, it will resolve symlinks even if the
+ # symlink type is incorrect. To make this test meaningful, try
+ # them with a native, non-MSYS executable, such as `git config`.
+ test "$(git config -f file-link a.b)" = "c" &&
+ test "$(git config -f dir-link/file2 x.y)" = "z"
+'
+
+test_done
diff --git a/t/t2300-cd-to-toplevel.sh b/t/t2300-cd-to-toplevel.sh
index c8de6d8a190220..91f523d5198d8d 100755
--- a/t/t2300-cd-to-toplevel.sh
+++ b/t/t2300-cd-to-toplevel.sh
@@ -16,7 +16,7 @@ test_cd_to_toplevel () {
test_expect_success $3 "$2" '
(
cd '"'$1'"' &&
- PATH="$EXEC_PATH:$PATH" &&
+ PATH="$EXEC_PATH$PATH_SEP$PATH" &&
. git-sh-setup &&
cd_to_toplevel &&
[ "$(pwd -P)" = "$TOPLEVEL" ]
diff --git a/t/t2403-worktree-move.sh b/t/t2403-worktree-move.sh
index 0bb33e8b1b90fb..56faef26aa3bb1 100755
--- a/t/t2403-worktree-move.sh
+++ b/t/t2403-worktree-move.sh
@@ -271,4 +271,13 @@ test_expect_success 'move worktree with relative path to absolute path' '
test_cmp expect .git/worktrees/absolute/gitdir
'
+test_expect_success MINGW 'worktree remove does not traverse mount points' '
+ mkdir target &&
+ >target/dont-remove-me &&
+ git worktree add --detach wt-junction &&
+ cmd //c "mklink /j wt-junction\\mnt target" &&
+ git worktree remove --force wt-junction &&
+ test_path_is_file target/dont-remove-me
+'
+
test_done
diff --git a/t/t3307-notes-man.sh b/t/t3307-notes-man.sh
index 1aa366a410e9a3..7e5c06e6615d7a 100755
--- a/t/t3307-notes-man.sh
+++ b/t/t3307-notes-man.sh
@@ -26,7 +26,7 @@ test_expect_success 'example 1: notes to add an Acked-by line' '
'
test_expect_success 'example 2: binary notes' '
- cp "$TEST_DIRECTORY"/test-binary-1.png . &&
+ cp "$TEST_DIRECTORY"/lib-diff/test-binary-1.png . &&
git checkout B &&
blob=$(git hash-object -w test-binary-1.png) &&
git notes --ref=logo add -C "$blob" &&
diff --git a/t/t3418-rebase-continue.sh b/t/t3418-rebase-continue.sh
index f9b8999db50f1b..e03a28c0aaad24 100755
--- a/t/t3418-rebase-continue.sh
+++ b/t/t3418-rebase-continue.sh
@@ -82,7 +82,7 @@ test_expect_success 'rebase --continue remembers merge strategy and options' '
rm -f actual &&
(
- PATH=./test-bin:$PATH &&
+ PATH=./test-bin$PATH_SEP$PATH &&
test_must_fail git rebase -s funny -X"option=arg with space" \
-Xop\"tion\\ -X"new${LF}line " main topic
) &&
@@ -91,7 +91,7 @@ test_expect_success 'rebase --continue remembers merge strategy and options' '
echo "Resolved" >F2 &&
git add F2 &&
(
- PATH=./test-bin:$PATH &&
+ PATH=./test-bin$PATH_SEP$PATH &&
git rebase --continue
) &&
test_cmp expect actual
diff --git a/t/t3700-add.sh b/t/t3700-add.sh
index 2947bf9a6b1404..b9495e5cf00724 100755
--- a/t/t3700-add.sh
+++ b/t/t3700-add.sh
@@ -587,4 +587,15 @@ test_expect_success CASE_INSENSITIVE_FS 'path is case-insensitive' '
git add "$downcased"
'
+test_expect_success MINGW 'can add files via NTFS junctions' '
+ test_when_finished "cmd //c rmdir junction && rm -rf target" &&
+ test_create_repo target &&
+ cmd //c "mklink /j junction target" &&
+ >target/via-junction &&
+ git -C junction add "$(pwd)/junction/via-junction" &&
+ echo via-junction >expect &&
+ git -C target diff --cached --name-only >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 6e120a40011238..cb09158c214768 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -1204,6 +1204,27 @@ test_expect_success 'checkout -p patch editing of added file' '
)
'
+test_expect_success EXPENSIVE 'add -i with a lot of files' '
+ git reset --hard &&
+ x160=0123456789012345678901234567890123456789 &&
+ x160=$x160$x160$x160$x160 &&
+ y= &&
+ i=0 &&
+ while test $i -le 200
+ do
+ name=$(printf "%s%03d" $x160 $i) &&
+ echo $name >$name &&
+ git add -N $name &&
+ y="${y}y$LF" &&
+ i=$(($i+1)) ||
+ exit 1
+ done &&
+ echo "$y" | git add -p -- . &&
+ git diff --cached >staged &&
+ test_line_count = 1407 staged &&
+ git reset --hard
+'
+
test_expect_success 'show help from add--helper' '
git reset --hard &&
cat >expect <<-EOF &&
diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh
index 70879941c22f8c..0c9022290fad0f 100755
--- a/t/t3903-stash.sh
+++ b/t/t3903-stash.sh
@@ -1377,7 +1377,7 @@ test_expect_success 'stash -- works with binary files' '
mkdir -p subdir &&
>subdir/untracked &&
>subdir/tracked &&
- cp "$TEST_DIRECTORY"/test-binary-1.png subdir/tracked-binary &&
+ cp "$TEST_DIRECTORY"/lib-diff/test-binary-1.png subdir/tracked-binary &&
git add subdir/tracked* &&
git stash -- subdir/ &&
test_path_is_missing subdir/tracked &&
diff --git a/t/t4012-diff-binary.sh b/t/t4012-diff-binary.sh
index 97b5ac04071d36..0fb50d2ffc91d9 100755
--- a/t/t4012-diff-binary.sh
+++ b/t/t4012-diff-binary.sh
@@ -19,7 +19,7 @@ test_expect_success 'prepare repository' '
echo AIT >a && echo BIT >b && echo CIT >c && echo DIT >d &&
git update-index --add a b c d &&
echo git >a &&
- cat "$TEST_DIRECTORY"/test-binary-1.png >b &&
+ cat "$TEST_DIRECTORY"/lib-diff/test-binary-1.png >b &&
echo git >c &&
cat b b >d
'
diff --git a/t/t4049-diff-stat-count.sh b/t/t4049-diff-stat-count.sh
index eceb47c8594416..2161a1e8cf5ba6 100755
--- a/t/t4049-diff-stat-count.sh
+++ b/t/t4049-diff-stat-count.sh
@@ -33,7 +33,7 @@ test_expect_success 'binary changes do not count in lines' '
git reset --hard &&
echo a >a &&
echo c >c &&
- cat "$TEST_DIRECTORY"/test-binary-1.png >d &&
+ cat "$TEST_DIRECTORY"/lib-diff/test-binary-1.png >d &&
cat >expect <<-\EOF &&
a | 1 +
c | 1 +
diff --git a/t/t4108-apply-threeway.sh b/t/t4108-apply-threeway.sh
index f30e85659dbb87..7f84edd9653a7d 100755
--- a/t/t4108-apply-threeway.sh
+++ b/t/t4108-apply-threeway.sh
@@ -272,11 +272,11 @@ test_expect_success 'apply with --3way --cached and conflicts' '
test_expect_success 'apply binary file patch' '
git reset --hard main &&
- cp "$TEST_DIRECTORY/test-binary-1.png" bin.png &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-1.png" bin.png &&
git add bin.png &&
git commit -m "add binary file" &&
- cp "$TEST_DIRECTORY/test-binary-2.png" bin.png &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-2.png" bin.png &&
git diff --binary >bin.diff &&
git reset --hard &&
@@ -287,11 +287,11 @@ test_expect_success 'apply binary file patch' '
test_expect_success 'apply binary file patch with 3way' '
git reset --hard main &&
- cp "$TEST_DIRECTORY/test-binary-1.png" bin.png &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-1.png" bin.png &&
git add bin.png &&
git commit -m "add binary file" &&
- cp "$TEST_DIRECTORY/test-binary-2.png" bin.png &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-2.png" bin.png &&
git diff --binary >bin.diff &&
git reset --hard &&
@@ -302,11 +302,11 @@ test_expect_success 'apply binary file patch with 3way' '
test_expect_success 'apply full-index patch with 3way' '
git reset --hard main &&
- cp "$TEST_DIRECTORY/test-binary-1.png" bin.png &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-1.png" bin.png &&
git add bin.png &&
git commit -m "add binary file" &&
- cp "$TEST_DIRECTORY/test-binary-2.png" bin.png &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-2.png" bin.png &&
git diff --full-index >bin.diff &&
git reset --hard &&
diff --git a/t/t5003-archive-zip.sh b/t/t5003-archive-zip.sh
index c8c1c5c06b6037..8f2a2cbc6b8103 100755
--- a/t/t5003-archive-zip.sh
+++ b/t/t5003-archive-zip.sh
@@ -88,7 +88,7 @@ test_expect_success \
'mkdir a &&
echo simple textfile >a/a &&
mkdir a/bin &&
- cp /bin/sh a/bin &&
+ cp "$TEST_DIRECTORY/lib-diff/test-binary-1.png" a/bin &&
printf "text\r" >a/text.cr &&
printf "text\r\n" >a/text.crlf &&
printf "text\n" >a/text.lf &&
diff --git a/t/t5409-colorize-remote-messages.sh b/t/t5409-colorize-remote-messages.sh
index fa5de4500a4f50..a755c49a74e634 100755
--- a/t/t5409-colorize-remote-messages.sh
+++ b/t/t5409-colorize-remote-messages.sh
@@ -98,4 +98,34 @@ test_expect_success 'fallback to color.ui' '
grep "error: error" decoded
'
+test_expect_success 'disallow (color) control sequences in sideband' '
+ write_script .git/color-me-surprised <<-\EOF &&
+ printf "error: Have you \\033[31mread\\033[m this?\\a\\n" >&2
+ exec "$@"
+ EOF
+ test_config_global uploadPack.packObjectshook ./color-me-surprised &&
+ test_commit need-at-least-one-commit &&
+
+ git clone --no-local . throw-away 2>stderr &&
+ test_decode_color decoded &&
+ test_grep RED decoded &&
+ test_grep "\\^G" stderr &&
+ tr -dc "\\007" actual &&
+ test_must_be_empty actual &&
+
+ rm -rf throw-away &&
+ git -c sideband.allowControlCharacters=false \
+ clone --no-local . throw-away 2>stderr &&
+ test_decode_color