forked from Mirrors/helix
commit
6ae0186ef1
@ -1,3 +1,3 @@
|
||||
[alias]
|
||||
xtask = "run --package xtask --"
|
||||
integration-test = "test --features integration --workspace --test integration"
|
||||
integration-test = "test --features integration --profile integration --workspace --test integration"
|
||||
|
@ -1,3 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "1.61.0"
|
||||
components = ["rustfmt", "rust-src"]
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
# Commands
|
||||
|
||||
Command mode can be activated by pressing `:`, similar to Vim. Built-in commands:
|
||||
Command mode can be activated by pressing `:`. The built-in commands are:
|
||||
|
||||
{{#include ./generated/typable-cmd.md}}
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Guides
|
||||
|
||||
This section contains guides for adding new language server configurations,
|
||||
tree-sitter grammars, textobject queries, etc.
|
||||
tree-sitter grammars, textobject queries, and other similar items.
|
||||
|
@ -1,45 +1,52 @@
|
||||
# Adding languages
|
||||
# Adding new languages to Helix
|
||||
|
||||
In order to add a new language to Helix, you will need to follow the steps
|
||||
below.
|
||||
|
||||
## Language configuration
|
||||
|
||||
To add a new language, you need to add a `[[language]]` entry to the
|
||||
`languages.toml` (see the [language configuration section]).
|
||||
1. Add a new `[[language]]` entry in the `languages.toml` file and provide the
|
||||
necessary configuration for the new language. For more information on
|
||||
language configuration, refer to the
|
||||
[language configuration section](../languages.md) of the documentation.
|
||||
2. If you are adding a new language or updating an existing language server
|
||||
configuration, run the command `cargo xtask docgen` to update the
|
||||
[Language Support](../lang-support.md) documentation.
|
||||
|
||||
When adding a new language or Language Server configuration for an existing
|
||||
language, run `cargo xtask docgen` to add the new configuration to the
|
||||
[Language Support][lang-support] docs before creating a pull request.
|
||||
When adding a Language Server configuration, be sure to update the
|
||||
[Language Server Wiki][install-lsp-wiki] with installation notes.
|
||||
> 💡 If you are adding a new Language Server configuration, make sure to update
|
||||
> the
|
||||
> [Language Server Wiki](https://github.com/helix-editor/helix/wiki/How-to-install-the-default-language-servers)
|
||||
> with the installation instructions.
|
||||
|
||||
## Grammar configuration
|
||||
|
||||
If a tree-sitter grammar is available for the language, add a new `[[grammar]]`
|
||||
entry to `languages.toml`.
|
||||
|
||||
You may use the `source.path` key rather than `source.git` with an absolute path
|
||||
to a locally available grammar for testing, but switch to `source.git` before
|
||||
submitting a pull request.
|
||||
1. If a tree-sitter grammar is available for the new language, add a new
|
||||
`[[grammar]]` entry to the `languages.toml` file.
|
||||
2. If you are testing the grammar locally, you can use the `source.path` key
|
||||
with an absolute path to the grammar. However, before submitting a pull
|
||||
request, make sure to switch to using `source.git`.
|
||||
|
||||
## Queries
|
||||
|
||||
For a language to have syntax-highlighting and indentation among
|
||||
other things, you have to add queries. Add a directory for your
|
||||
language with the path `runtime/queries/<name>/`. The tree-sitter
|
||||
[website](https://tree-sitter.github.io/tree-sitter/syntax-highlighting#queries)
|
||||
gives more info on how to write queries.
|
||||
|
||||
> NOTE: When evaluating queries, the first matching query takes
|
||||
precedence, which is different from other editors like Neovim where
|
||||
the last matching query supersedes the ones before it. See
|
||||
[this issue][neovim-query-precedence] for an example.
|
||||
|
||||
## Common Issues
|
||||
|
||||
- If you get errors when running after switching branches, you may have to update the tree-sitter grammars. Run `hx --grammar fetch` to fetch the grammars and `hx --grammar build` to build any out-of-date grammars.
|
||||
|
||||
- If a parser is segfaulting or you want to remove the parser, make sure to remove the compiled parser in `runtime/grammar/<name>.so`
|
||||
|
||||
[language configuration section]: ../languages.md
|
||||
[neovim-query-precedence]: https://github.com/helix-editor/helix/pull/1170#issuecomment-997294090
|
||||
[install-lsp-wiki]: https://github.com/helix-editor/helix/wiki/How-to-install-the-default-language-servers
|
||||
[lang-support]: ../lang-support.md
|
||||
1. In order to provide syntax highlighting and indentation for the new language,
|
||||
you will need to add queries.
|
||||
2. Create a new directory for the language with the path
|
||||
`runtime/queries/<name>/`.
|
||||
3. Refer to the
|
||||
[tree-sitter website](https://tree-sitter.github.io/tree-sitter/syntax-highlighting#queries)
|
||||
for more information on writing queries.
|
||||
|
||||
> 💡 In Helix, the first matching query takes precedence when evaluating
|
||||
> queries, which is different from other editors such as Neovim where the last
|
||||
> matching query supersedes the ones before it. See
|
||||
> [this issue](https://github.com/helix-editor/helix/pull/1170#issuecomment-997294090)
|
||||
> for an example.
|
||||
|
||||
## Common issues
|
||||
|
||||
- If you encounter errors when running Helix after switching branches, you may
|
||||
need to update the tree-sitter grammars. Run the command `hx --grammar fetch`
|
||||
to fetch the grammars and `hx --grammar build` to build any out-of-date
|
||||
grammars.
|
||||
- If a parser is causing a segfault, or you want to remove it, make sure to
|
||||
remove the compiled parser located at `runtime/grammars/<name>.so`.
|
||||
|
@ -1,171 +1,250 @@
|
||||
# Installation
|
||||
|
||||
We provide pre-built binaries on the [GitHub Releases page](https://github.com/helix-editor/helix/releases).
|
||||
# Installing Helix
|
||||
|
||||
<!--toc:start-->
|
||||
- [Pre-built binaries](#pre-built-binaries)
|
||||
- [Linux, macOS, Windows and OpenBSD packaging status](#linux-macos-windows-and-openbsd-packaging-status)
|
||||
- [Linux](#linux)
|
||||
- [Ubuntu](#ubuntu)
|
||||
- [Fedora/RHEL](#fedorarhel)
|
||||
- [Arch Linux community](#arch-linux-community)
|
||||
- [NixOS](#nixos)
|
||||
- [AppImage](#appimage)
|
||||
- [macOS](#macos)
|
||||
- [Homebrew Core](#homebrew-core)
|
||||
- [Windows](#windows)
|
||||
- [Scoop](#scoop)
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [MSYS2](#msys2)
|
||||
- [Building from source](#building-from-source)
|
||||
- [Configuring Helix's runtime files](#configuring-helixs-runtime-files)
|
||||
- [Validating the installation](#validating-the-installation)
|
||||
- [Configure the desktop shortcut](#configure-the-desktop-shortcut)
|
||||
<!--toc:end-->
|
||||
|
||||
To install Helix, follow the instructions specific to your operating system.
|
||||
Note that:
|
||||
|
||||
- To get the latest nightly version of Helix, you need to
|
||||
[build from source](#building-from-source).
|
||||
|
||||
- To take full advantage of Helix, install the language servers for your
|
||||
preferred programming languages. See the
|
||||
[wiki](https://github.com/helix-editor/helix/wiki/How-to-install-the-default-language-servers)
|
||||
for instructions.
|
||||
|
||||
## Pre-built binaries
|
||||
|
||||
Download pre-built binaries from the
|
||||
[GitHub Releases page](https://github.com/helix-editor/helix/releases). Add the binary to your system's `$PATH` to use it from the command
|
||||
line.
|
||||
|
||||
## Linux, macOS, Windows and OpenBSD packaging status
|
||||
|
||||
Helix is available for Linux, macOS and Windows via the official repositories listed below.
|
||||
|
||||
[![Packaging status](https://repology.org/badge/vertical-allrepos/helix.svg)](https://repology.org/project/helix/versions)
|
||||
|
||||
## OSX
|
||||
## Linux
|
||||
|
||||
Helix is available in homebrew-core:
|
||||
The following third party repositories are available:
|
||||
|
||||
```
|
||||
brew install helix
|
||||
### Ubuntu
|
||||
|
||||
Helix is available via [Maveonair's PPA](https://launchpad.net/~maveonair/+archive/ubuntu/helix-editor):
|
||||
|
||||
```sh
|
||||
sudo add-apt-repository ppa:maveonair/helix-editor
|
||||
sudo apt update
|
||||
sudo apt install helix
|
||||
```
|
||||
|
||||
## Linux
|
||||
### Fedora/RHEL
|
||||
|
||||
### NixOS
|
||||
Helix is available via `copr`:
|
||||
|
||||
```sh
|
||||
sudo dnf copr enable varlad/helix
|
||||
sudo dnf install helix
|
||||
```
|
||||
|
||||
A [flake](https://nixos.wiki/wiki/Flakes) containing the package is available in
|
||||
the project root. The flake can also be used to spin up a reproducible development
|
||||
shell for working on Helix with `nix develop`.
|
||||
### Arch Linux community
|
||||
|
||||
Flake outputs are cached for each push to master using
|
||||
[Cachix](https://www.cachix.org/). The flake is configured to
|
||||
automatically make use of this cache assuming the user accepts
|
||||
the new settings on first use.
|
||||
Releases are available in the `community` repository:
|
||||
|
||||
If you are using a version of Nix without flakes enabled you can
|
||||
[install Cachix cli](https://docs.cachix.org/installation); `cachix use helix` will
|
||||
configure Nix to use cached outputs when possible.
|
||||
```sh
|
||||
sudo pacman -S helix
|
||||
```
|
||||
Additionally, a [helix-git](https://aur.archlinux.org/packages/helix-git/) package is available
|
||||
in the AUR, which builds the master branch.
|
||||
|
||||
### Arch Linux
|
||||
### NixOS
|
||||
|
||||
Releases are available in the `community` repository.
|
||||
Helix is available as a [flake](https://nixos.wiki/wiki/Flakes) in the project
|
||||
root. Use `nix develop` to spin up a reproducible development shell. Outputs are
|
||||
cached for each push to master using [Cachix](https://www.cachix.org/). The
|
||||
flake is configured to automatically make use of this cache assuming the user
|
||||
accepts the new settings on first use.
|
||||
|
||||
A [helix-git](https://aur.archlinux.org/packages/helix-git/) package is also available on the AUR, which builds the master branch.
|
||||
If you are using a version of Nix without flakes enabled,
|
||||
[install Cachix CLI](https://docs.cachix.org/installation) and use
|
||||
`cachix use helix` to configure Nix to use cached outputs when possible.
|
||||
|
||||
### Fedora Linux
|
||||
### AppImage
|
||||
|
||||
You can install the COPR package for Helix via
|
||||
Install Helix using [AppImage](https://appimage.org/).
|
||||
Download Helix AppImage from the [latest releases](https://github.com/helix-editor/helix/releases/latest) page.
|
||||
|
||||
```sh
|
||||
chmod +x helix-*.AppImage # change permission for executable mode
|
||||
./helix-*.AppImage # run helix
|
||||
```
|
||||
sudo dnf copr enable varlad/helix
|
||||
sudo dnf install helix
|
||||
```
|
||||
|
||||
## macOS
|
||||
|
||||
### Void Linux
|
||||
### Homebrew Core
|
||||
|
||||
```
|
||||
sudo xbps-install helix
|
||||
```sh
|
||||
brew install helix
|
||||
```
|
||||
|
||||
## Windows
|
||||
|
||||
Helix can be installed using [Scoop](https://scoop.sh/), [Chocolatey](https://chocolatey.org/)
|
||||
Install on Windows using [Scoop](https://scoop.sh/), [Chocolatey](https://chocolatey.org/)
|
||||
or [MSYS2](https://msys2.org/).
|
||||
|
||||
**Scoop:**
|
||||
### Scoop
|
||||
|
||||
```
|
||||
```sh
|
||||
scoop install helix
|
||||
```
|
||||
|
||||
**Chocolatey:**
|
||||
### Chocolatey
|
||||
|
||||
```
|
||||
```sh
|
||||
choco install helix
|
||||
```
|
||||
|
||||
**MSYS2:**
|
||||
|
||||
Choose the proper command for your system from below:
|
||||
### MSYS2
|
||||
|
||||
- For 32 bit Windows 7 or above:
|
||||
For 64-bit Windows 8.1 or above:
|
||||
|
||||
```
|
||||
pacman -S mingw-w64-i686-helix
|
||||
```sh
|
||||
pacman -S mingw-w64-ucrt-x86_64-helix
|
||||
```
|
||||
|
||||
- For 64 bit Windows 7 or above:
|
||||
## Building from source
|
||||
|
||||
```
|
||||
pacman -S mingw-w64-x86_64-helix
|
||||
```
|
||||
Clone the repository:
|
||||
|
||||
- For 64 bit Windows 8.1 or above:
|
||||
|
||||
```
|
||||
pacman -S mingw-w64-ucrt-x86_64-helix
|
||||
```sh
|
||||
git clone https://github.com/helix-editor/helix
|
||||
cd helix
|
||||
```
|
||||
|
||||
## Build from source
|
||||
Compile from source:
|
||||
|
||||
```
|
||||
git clone https://github.com/helix-editor/helix
|
||||
cd helix
|
||||
```sh
|
||||
cargo install --path helix-term --locked
|
||||
```
|
||||
|
||||
This will install the `hx` binary to `$HOME/.cargo/bin` and build tree-sitter grammars in `./runtime/grammars`.
|
||||
This command will create the `hx` executable and construct the tree-sitter
|
||||
grammars in the local `runtime` folder. To build the tree-sitter grammars requires
|
||||
a c++ compiler to be installed, for example `gcc-c++`.
|
||||
|
||||
> 💡 If you are using the musl-libc instead of glibc the following environment variable must be set during the build
|
||||
> to ensure tree-sitter grammars can be loaded correctly:
|
||||
>
|
||||
> ```sh
|
||||
> RUSTFLAGS="-C target-feature=-crt-static"
|
||||
> ```
|
||||
|
||||
Helix also needs its runtime files so make sure to copy/symlink the `runtime/` directory into the
|
||||
config directory (for example `~/.config/helix/runtime` on Linux/macOS). This location can be overridden
|
||||
via the `HELIX_RUNTIME` environment variable.
|
||||
> 💡 Tree-sitter grammars can be fetched and compiled if not pre-packaged. Fetch
|
||||
> grammars with `hx --grammar fetch` (requires `git`) and compile them with
|
||||
> `hx --grammar build` (requires a C++ compiler). This will install them in
|
||||
> the `runtime` directory within the user's helix config directory (more
|
||||
> [details below](#multiple-runtime-directories)).
|
||||
|
||||
| OS | Command |
|
||||
| -------------------- | ------------------------------------------------ |
|
||||
| Windows (Cmd) | `xcopy /e /i runtime %AppData%\helix\runtime` |
|
||||
| Windows (PowerShell) | `xcopy /e /i runtime $Env:AppData\helix\runtime` |
|
||||
| Linux / macOS | `ln -s $PWD/runtime ~/.config/helix/runtime` |
|
||||
### Configuring Helix's runtime files
|
||||
|
||||
Starting with Windows Vista you can also create symbolic links on Windows. Note that this requires
|
||||
elevated privileges - i.e. PowerShell or Cmd must be run as administrator.
|
||||
#### Linux and macOS
|
||||
|
||||
**PowerShell:**
|
||||
Either set the `HELIX_RUNTIME` environment variable to point to the runtime files and add it to your `~/.bashrc` or equivalent:
|
||||
|
||||
```powershell
|
||||
New-Item -ItemType SymbolicLink -Target "runtime" -Path "$Env:AppData\helix\runtime"
|
||||
```sh
|
||||
HELIX_RUNTIME=/home/user-name/src/helix/runtime
|
||||
```
|
||||
|
||||
**Cmd:**
|
||||
Or, create a symlink in `~/.config/helix` that links to the source code directory:
|
||||
|
||||
```cmd
|
||||
cd %appdata%\helix
|
||||
mklink /D runtime "<helix-repo>\runtime"
|
||||
```sh
|
||||
ln -s $PWD/runtime ~/.config/helix/runtime
|
||||
```
|
||||
|
||||
The runtime location can be overridden via the `HELIX_RUNTIME` environment variable.
|
||||
#### Windows
|
||||
|
||||
> NOTE: if `HELIX_RUNTIME` is set prior to calling `cargo install --path helix-term --locked`,
|
||||
> tree-sitter grammars will be built in `$HELIX_RUNTIME/grammars`.
|
||||
Either set the `HELIX_RUNTIME` environment variable to point to the runtime files using the Windows setting (search for
|
||||
`Edit environment variables for your account`) or use the `setx` command in
|
||||
Cmd:
|
||||
|
||||
If you plan on keeping the repo locally, an alternative to copying/symlinking
|
||||
runtime files is to set `HELIX_RUNTIME=/path/to/helix/runtime`
|
||||
(`HELIX_RUNTIME=$PWD/runtime` if you're in the helix repo directory).
|
||||
```sh
|
||||
setx HELIX_RUNTIME "%userprofile%\source\repos\helix\runtime"
|
||||
```
|
||||
|
||||
To use Helix in desktop environments that supports [XDG desktop menu](https://specifications.freedesktop.org/menu-spec/menu-spec-latest.html), including Gnome and KDE, copy the provided `.desktop` file to the correct folder:
|
||||
> 💡 `%userprofile%` resolves to your user directory like
|
||||
> `C:\Users\Your-Name\` for example.
|
||||
|
||||
```bash
|
||||
cp contrib/Helix.desktop ~/.local/share/applications
|
||||
```
|
||||
Or, create a symlink in `%appdata%\helix\` that links to the source code directory:
|
||||
|
||||
To use another terminal than the default, you will need to modify the `.desktop` file. For example, to use `kitty`:
|
||||
| Method | Command |
|
||||
| ---------- | -------------------------------------------------------------------------------------- |
|
||||
| PowerShell | `New-Item -ItemType Junction -Target "runtime" -Path "$Env:AppData\helix\runtime"` |
|
||||
| Cmd | `cd %appdata%\helix` <br/> `mklink /D runtime "%userprofile%\src\helix\runtime"` |
|
||||
|
||||
```bash
|
||||
sed -i "s|Exec=hx %F|Exec=kitty hx %F|g" ~/.local/share/applications/Helix.desktop
|
||||
sed -i "s|Terminal=true|Terminal=false|g" ~/.local/share/applications/Helix.desktop
|
||||
```
|
||||
> 💡 On Windows, creating a symbolic link may require running PowerShell or
|
||||
> Cmd as an administrator.
|
||||
|
||||
Please note: there is no icon for Helix yet, so the system default will be used.
|
||||
#### Multiple runtime directories
|
||||
|
||||
## Finishing up the installation
|
||||
When Helix finds multiple runtime directories it will search through them for files in the
|
||||
following order:
|
||||
|
||||
To make sure everything is set up as expected you should finally run the helix healthcheck via
|
||||
1. `runtime/` sibling directory to `$CARGO_MANIFEST_DIR` directory (this is intended for
|
||||
developing and testing helix only).
|
||||
2. `runtime/` subdirectory of OS-dependent helix user config directory.
|
||||
3. `$HELIX_RUNTIME`.
|
||||
4. `runtime/` subdirectory of path to Helix executable.
|
||||
|
||||
```
|
||||
This order also sets the priority for selecting which file will be used if multiple runtime
|
||||
directories have files with the same name.
|
||||
|
||||
### Validating the installation
|
||||
|
||||
To make sure everything is set up as expected you should run the Helix health
|
||||
check:
|
||||
|
||||
```sh
|
||||
hx --health
|
||||
```
|
||||
|
||||
For more information on the information displayed in the health check results refer to [Healthcheck](https://github.com/helix-editor/helix/wiki/Healthcheck).
|
||||
For more information on the health check results refer to
|
||||
[Health check](https://github.com/helix-editor/helix/wiki/Healthcheck).
|
||||
|
||||
### Building tree-sitter grammars
|
||||
### Configure the desktop shortcut
|
||||
|
||||
Tree-sitter grammars must be fetched and compiled if not pre-packaged.
|
||||
Fetch grammars with `hx --grammar fetch` (requires `git`) and compile them
|
||||
with `hx --grammar build` (requires a C++ compiler).
|
||||
If your desktop environment supports the
|
||||
[XDG desktop menu](https://specifications.freedesktop.org/menu-spec/menu-spec-latest.html)
|
||||
you can configure Helix to show up in the application menu by copying the
|
||||
provided `.desktop` and icon files to their correct folders:
|
||||
|
||||
### Installing language servers
|
||||
```sh
|
||||
cp contrib/Helix.desktop ~/.local/share/applications
|
||||
cp contrib/helix.png ~/.icons # or ~/.local/share/icons
|
||||
```
|
||||
|
||||
To use another terminal than the system default, you can modify the `.desktop`
|
||||
file. For example, to use `kitty`:
|
||||
|
||||
Language servers can optionally be installed if you want their features (auto-complete, diagnostics etc.).
|
||||
Follow the [instructions on the wiki page](https://github.com/helix-editor/helix/wiki/How-to-install-the-default-language-servers) to add your language servers of choice.
|
||||
```sh
|
||||
sed -i "s|Exec=hx %F|Exec=kitty hx %F|g" ~/.local/share/applications/Helix.desktop
|
||||
sed -i "s|Terminal=true|Terminal=false|g" ~/.local/share/applications/Helix.desktop
|
||||
```
|
||||
|
@ -0,0 +1,87 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<component type="desktop-application">
|
||||
<id>com.helix_editor.Helix</id>
|
||||
<metadata_license>CC0-1.0</metadata_license>
|
||||
<project_license>MPL-2.0</project_license>
|
||||
<name>Helix</name>
|
||||
<summary>A post-modern text editor</summary>
|
||||
|
||||
<description>
|
||||
<p>
|
||||
Helix is a terminal-based text editor inspired by Kakoune / Neovim and written in Rust.
|
||||
</p>
|
||||
<ul>
|
||||
<li>Vim-like modal editing</li>
|
||||
<li>Multiple selections</li>
|
||||
<li>Built-in language server support</li>
|
||||
<li>Smart, incremental syntax highlighting and code editing via tree-sitter</li>
|
||||
</ul>
|
||||
</description>
|
||||
|
||||
<launchable type="desktop-id">Helix.desktop</launchable>
|
||||
|
||||
<screenshots>
|
||||
<screenshot type="default">
|
||||
<caption>Helix with default theme</caption>
|
||||

|
||||
</screenshot>
|
||||
</screenshots>
|
||||
|
||||
<url type="homepage">https://helix-editor.com/</url>
|
||||
<url type="donation">https://opencollective.com/helix-editor</url>
|
||||
<url type="help">https://docs.helix-editor.com/</url>
|
||||
<url type="vcs-browser">https://github.com/helix-editor/helix</url>
|
||||
<url type="bugtracker">https://github.com/helix-editor/helix/issues</url>
|
||||
|
||||
<content_rating type="oars-1.1" />
|
||||
|
||||
<releases>
|
||||
<release version="22.12" date="2022-12-6">
|
||||
<url>https://helix-editor.com/news/release-22-12-highlights/</url>
|
||||
</release>
|
||||
<release version="22.08" date="2022-8-31">
|
||||
<url>https://helix-editor.com/news/release-22-08-highlights/</url>
|
||||
</release>
|
||||
<release version="22.05" date="2022-5-28">
|
||||
<url>https://helix-editor.com/news/release-22-05-highlights/</url>
|
||||
</release>
|
||||
<release version="22.03" date="2022-3-28">
|
||||
<url>https://helix-editor.com/news/release-22-03-highlights/</url>
|
||||
</release>
|
||||
</releases>
|
||||
|
||||
<requires>
|
||||
<control>keyboard</control>
|
||||
</requires>
|
||||
|
||||
<categories>
|
||||
<category>Utility</category>
|
||||
<category>TextEditor</category>
|
||||
</categories>
|
||||
|
||||
<keywords>
|
||||
<keyword>text</keyword>
|
||||
<keyword>editor</keyword>
|
||||
<keyword>development</keyword>
|
||||
<keyword>programming</keyword>
|
||||
</keywords>
|
||||
|
||||
<provides>
|
||||
<binary>hx</binary>
|
||||
<mediatype>text/english</mediatype>
|
||||
<mediatype>text/plain</mediatype>
|
||||
<mediatype>text/x-makefile</mediatype>
|
||||
<mediatype>text/x-c++hdr</mediatype>
|
||||
<mediatype>text/x-c++src</mediatype>
|
||||
<mediatype>text/x-chdr</mediatype>
|
||||
<mediatype>text/x-csrc</mediatype>
|
||||
<mediatype>text/x-java</mediatype>
|
||||
<mediatype>text/x-moc</mediatype>
|
||||
<mediatype>text/x-pascal</mediatype>
|
||||
<mediatype>text/x-tcl</mediatype>
|
||||
<mediatype>text/x-tex</mediatype>
|
||||
<mediatype>application/x-shellscript</mediatype>
|
||||
<mediatype>text/x-c</mediatype>
|
||||
<mediatype>text/x-c++</mediatype>
|
||||
</provides>
|
||||
</component>
|
@ -0,0 +1,384 @@
|
||||
//! The `DocumentFormatter` forms the bridge between the raw document text
|
||||
//! and onscreen positioning. It yields the text graphemes as an iterator
|
||||
//! and traverses (part) of the document text. During that traversal it
|
||||
//! handles grapheme detection, softwrapping and annotations.
|
||||
//! It yields `FormattedGrapheme`s and their corresponding visual coordinates.
|
||||
//!
|
||||
//! As both virtual text and softwrapping can insert additional lines into the document
|
||||
//! it is generally not possible to find the start of the previous visual line.
|
||||
//! Instead the `DocumentFormatter` starts at the last "checkpoint" (usually a linebreak)
|
||||
//! called a "block" and the caller must advance it as needed.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Debug;
|
||||
use std::mem::{replace, take};
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
use unicode_segmentation::{Graphemes, UnicodeSegmentation};
|
||||
|
||||
use crate::graphemes::{Grapheme, GraphemeStr};
|
||||
use crate::syntax::Highlight;
|
||||
use crate::text_annotations::TextAnnotations;
|
||||
use crate::{Position, RopeGraphemes, RopeSlice};
|
||||
|
||||
/// TODO make Highlight a u32 to reduce the size of this enum to a single word.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum GraphemeSource {
|
||||
Document {
|
||||
codepoints: u32,
|
||||
},
|
||||
/// Inline virtual text can not be highlighted with a `Highlight` iterator
|
||||
/// because it's not part of the document. Instead the `Highlight`
|
||||
/// is emitted right by the document formatter
|
||||
VirtualText {
|
||||
highlight: Option<Highlight>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FormattedGrapheme<'a> {
|
||||
pub grapheme: Grapheme<'a>,
|
||||
pub source: GraphemeSource,
|
||||
}
|
||||
|
||||
impl<'a> FormattedGrapheme<'a> {
|
||||
pub fn new(
|
||||
g: GraphemeStr<'a>,
|
||||
visual_x: usize,
|
||||
tab_width: u16,
|
||||
source: GraphemeSource,
|
||||
) -> FormattedGrapheme<'a> {
|
||||
FormattedGrapheme {
|
||||
grapheme: Grapheme::new(g, visual_x, tab_width),
|
||||
source,
|
||||
}
|
||||
}
|
||||
/// Returns whether this grapheme is virtual inline text
|
||||
pub fn is_virtual(&self) -> bool {
|
||||
matches!(self.source, GraphemeSource::VirtualText { .. })
|
||||
}
|
||||
|
||||
pub fn placeholder() -> Self {
|
||||
FormattedGrapheme {
|
||||
grapheme: Grapheme::Other { g: " ".into() },
|
||||
source: GraphemeSource::Document { codepoints: 0 },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn doc_chars(&self) -> usize {
|
||||
match self.source {
|
||||
GraphemeSource::Document { codepoints } => codepoints as usize,
|
||||
GraphemeSource::VirtualText { .. } => 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_whitespace(&self) -> bool {
|
||||
self.grapheme.is_whitespace()
|
||||
}
|
||||
|
||||
pub fn width(&self) -> usize {
|
||||
self.grapheme.width()
|
||||
}
|
||||
|
||||
pub fn is_word_boundary(&self) -> bool {
|
||||
self.grapheme.is_word_boundary()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TextFormat {
|
||||
pub soft_wrap: bool,
|
||||
pub tab_width: u16,
|
||||
pub max_wrap: u16,
|
||||
pub max_indent_retain: u16,
|
||||
pub wrap_indicator: Box<str>,
|
||||
pub wrap_indicator_highlight: Option<Highlight>,
|
||||
pub viewport_width: u16,
|
||||
}
|
||||
|
||||
// test implementation is basically only used for testing or when softwrap is always disabled
|
||||
impl Default for TextFormat {
|
||||
fn default() -> Self {
|
||||
TextFormat {
|
||||
soft_wrap: false,
|
||||
tab_width: 4,
|
||||
max_wrap: 3,
|
||||
max_indent_retain: 4,
|
||||
wrap_indicator: Box::from(" "),
|
||||
viewport_width: 17,
|
||||
wrap_indicator_highlight: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DocumentFormatter<'t> {
|
||||
text_fmt: &'t TextFormat,
|
||||
annotations: &'t TextAnnotations,
|
||||
|
||||
/// The visual position at the end of the last yielded word boundary
|
||||
visual_pos: Position,
|
||||
graphemes: RopeGraphemes<'t>,
|
||||
/// The character pos of the `graphemes` iter used for inserting annotations
|
||||
char_pos: usize,
|
||||
/// The line pos of the `graphemes` iter used for inserting annotations
|
||||
line_pos: usize,
|
||||
exhausted: bool,
|
||||
|
||||
/// Line breaks to be reserved for virtual text
|
||||
/// at the next line break
|
||||
virtual_lines: usize,
|
||||
inline_anntoation_graphemes: Option<(Graphemes<'t>, Option<Highlight>)>,
|
||||
|
||||
// softwrap specific
|
||||
/// The indentation of the current line
|
||||
/// Is set to `None` if the indentation level is not yet known
|
||||
/// because no non-whitespace graphemes have been encountered yet
|
||||
indent_level: Option<usize>,
|
||||
/// In case a long word needs to be split a single grapheme might need to be wrapped
|
||||
/// while the rest of the word stays on the same line
|
||||
peeked_grapheme: Option<(FormattedGrapheme<'t>, usize)>,
|
||||
/// A first-in first-out (fifo) buffer for the Graphemes of any given word
|
||||
word_buf: Vec<FormattedGrapheme<'t>>,
|
||||
/// The index of the next grapheme that will be yielded from the `word_buf`
|
||||
word_i: usize,
|
||||
}
|
||||
|
||||
impl<'t> DocumentFormatter<'t> {
|
||||
/// Creates a new formatter at the last block before `char_idx`.
|
||||
/// A block is a chunk which always ends with a linebreak.
|
||||
/// This is usually just a normal line break.
|
||||
/// However very long lines are always wrapped at constant intervals that can be cheaply calculated
|
||||
/// to avoid pathological behaviour.
|
||||
pub fn new_at_prev_checkpoint(
|
||||
text: RopeSlice<'t>,
|
||||
text_fmt: &'t TextFormat,
|
||||
annotations: &'t TextAnnotations,
|
||||
char_idx: usize,
|
||||
) -> (Self, usize) {
|
||||
// TODO divide long lines into blocks to avoid bad performance for long lines
|
||||
let block_line_idx = text.char_to_line(char_idx.min(text.len_chars()));
|
||||
let block_char_idx = text.line_to_char(block_line_idx);
|
||||
annotations.reset_pos(block_char_idx);
|
||||
(
|
||||
DocumentFormatter {
|
||||
text_fmt,
|
||||
annotations,
|
||||
visual_pos: Position { row: 0, col: 0 },
|
||||
graphemes: RopeGraphemes::new(text.slice(block_char_idx..)),
|
||||
char_pos: block_char_idx,
|
||||
exhausted: false,
|
||||
virtual_lines: 0,
|
||||
indent_level: None,
|
||||
peeked_grapheme: None,
|
||||
word_buf: Vec::with_capacity(64),
|
||||
word_i: 0,
|
||||
line_pos: block_line_idx,
|
||||
inline_anntoation_graphemes: None,
|
||||
},
|
||||
block_char_idx,
|
||||
)
|
||||
}
|
||||
|
||||
fn next_inline_annotation_grapheme(&mut self) -> Option<(&'t str, Option<Highlight>)> {
|
||||
loop {
|
||||
if let Some(&mut (ref mut annotation, highlight)) =
|
||||
self.inline_anntoation_graphemes.as_mut()
|
||||
{
|
||||
if let Some(grapheme) = annotation.next() {
|
||||
return Some((grapheme, highlight));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((annotation, highlight)) =
|
||||
self.annotations.next_inline_annotation_at(self.char_pos)
|
||||
{
|
||||
self.inline_anntoation_graphemes = Some((
|
||||
UnicodeSegmentation::graphemes(&*annotation.text, true),
|
||||
highlight,
|
||||
))
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn advance_grapheme(&mut self, col: usize) -> Option<FormattedGrapheme<'t>> {
|
||||
let (grapheme, source) =
|
||||
if let Some((grapheme, highlight)) = self.next_inline_annotation_grapheme() {
|
||||
(grapheme.into(), GraphemeSource::VirtualText { highlight })
|
||||
} else if let Some(grapheme) = self.graphemes.next() {
|
||||
self.virtual_lines += self.annotations.annotation_lines_at(self.char_pos);
|
||||
let codepoints = grapheme.len_chars() as u32;
|
||||
|
||||
let overlay = self.annotations.overlay_at(self.char_pos);
|
||||
let grapheme = match overlay {
|
||||
Some((overlay, _)) => overlay.grapheme.as_str().into(),
|
||||
None => Cow::from(grapheme).into(),
|
||||
};
|
||||
|
||||
self.char_pos += codepoints as usize;
|
||||
(grapheme, GraphemeSource::Document { codepoints })
|
||||
} else {
|
||||
if self.exhausted {
|
||||
return None;
|
||||
}
|
||||
self.exhausted = true;
|
||||
// EOF grapheme is required for rendering
|
||||
// and correct position computations
|
||||
return Some(FormattedGrapheme {
|
||||
grapheme: Grapheme::Other { g: " ".into() },
|
||||
source: GraphemeSource::Document { codepoints: 0 },
|
||||
});
|
||||
};
|
||||
|
||||
let grapheme = FormattedGrapheme::new(grapheme, col, self.text_fmt.tab_width, source);
|
||||
|
||||
Some(grapheme)
|
||||
}
|
||||
|
||||
/// Move a word to the next visual line
|
||||
fn wrap_word(&mut self, virtual_lines_before_word: usize) -> usize {
|
||||
// softwrap this word to the next line
|
||||
let indent_carry_over = if let Some(indent) = self.indent_level {
|
||||
if indent as u16 <= self.text_fmt.max_indent_retain {
|
||||
indent as u16
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} else {
|
||||
// ensure the indent stays 0
|
||||
self.indent_level = Some(0);
|
||||
0
|
||||
};
|
||||
|
||||
self.visual_pos.col = indent_carry_over as usize;
|
||||
self.virtual_lines -= virtual_lines_before_word;
|
||||
self.visual_pos.row += 1 + virtual_lines_before_word;
|
||||
let mut i = 0;
|
||||
let mut word_width = 0;
|
||||
let wrap_indicator = UnicodeSegmentation::graphemes(&*self.text_fmt.wrap_indicator, true)
|
||||
.map(|g| {
|
||||
i += 1;
|
||||
let grapheme = FormattedGrapheme::new(
|
||||
g.into(),
|
||||
self.visual_pos.col + word_width,
|
||||
self.text_fmt.tab_width,
|
||||
GraphemeSource::VirtualText {
|
||||
highlight: self.text_fmt.wrap_indicator_highlight,
|
||||
},
|
||||
);
|
||||
word_width += grapheme.width();
|
||||
grapheme
|
||||
});
|
||||
self.word_buf.splice(0..0, wrap_indicator);
|
||||
|
||||
for grapheme in &mut self.word_buf[i..] {
|
||||
let visual_x = self.visual_pos.col + word_width;
|
||||
grapheme
|
||||
.grapheme
|
||||
.change_position(visual_x, self.text_fmt.tab_width);
|
||||
word_width += grapheme.width();
|
||||
}
|
||||
word_width
|
||||
}
|
||||
|
||||
fn advance_to_next_word(&mut self) {
|
||||
self.word_buf.clear();
|
||||
let mut word_width = 0;
|
||||
let virtual_lines_before_word = self.virtual_lines;
|
||||
let mut virtual_lines_before_grapheme = self.virtual_lines;
|
||||
|
||||
loop {
|
||||
// softwrap word if necessary
|
||||
if word_width + self.visual_pos.col >= self.text_fmt.viewport_width as usize {
|
||||
// wrapping this word would move too much text to the next line
|
||||
// split the word at the line end instead
|
||||
if word_width > self.text_fmt.max_wrap as usize {
|
||||
// Usually we stop accomulating graphemes as soon as softwrapping becomes necessary.
|
||||
// However if the last grapheme is multiple columns wide it might extend beyond the EOL.
|
||||
// The condition below ensures that this grapheme is not cutoff and instead wrapped to the next line
|
||||
if word_width + self.visual_pos.col > self.text_fmt.viewport_width as usize {
|
||||
self.peeked_grapheme = self.word_buf.pop().map(|grapheme| {
|
||||
(grapheme, self.virtual_lines - virtual_lines_before_grapheme)
|
||||
});
|
||||
self.virtual_lines = virtual_lines_before_grapheme;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
word_width = self.wrap_word(virtual_lines_before_word);
|
||||
}
|
||||
|
||||
virtual_lines_before_grapheme = self.virtual_lines;
|
||||
|
||||
let grapheme = if let Some((grapheme, virtual_lines)) = self.peeked_grapheme.take() {
|
||||
self.virtual_lines += virtual_lines;
|
||||
grapheme
|
||||
} else if let Some(grapheme) = self.advance_grapheme(self.visual_pos.col + word_width) {
|
||||
grapheme
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Track indentation
|
||||
if !grapheme.is_whitespace() && self.indent_level.is_none() {
|
||||
self.indent_level = Some(self.visual_pos.col);
|
||||
} else if grapheme.grapheme == Grapheme::Newline {
|
||||
self.indent_level = None;
|
||||
}
|
||||
|
||||
let is_word_boundary = grapheme.is_word_boundary();
|
||||
word_width += grapheme.width();
|
||||
self.word_buf.push(grapheme);
|
||||
|
||||
if is_word_boundary {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// returns the document line pos of the **next** grapheme that will be yielded
|
||||
pub fn line_pos(&self) -> usize {
|
||||
self.line_pos
|
||||
}
|
||||
|
||||
/// returns the visual pos of the **next** grapheme that will be yielded
|
||||
pub fn visual_pos(&self) -> Position {
|
||||
self.visual_pos
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> Iterator for DocumentFormatter<'t> {
|
||||
type Item = (FormattedGrapheme<'t>, Position);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let grapheme = if self.text_fmt.soft_wrap {
|
||||
if self.word_i >= self.word_buf.len() {
|
||||
self.advance_to_next_word();
|
||||
self.word_i = 0;
|
||||
}
|
||||
let grapheme = replace(
|
||||
self.word_buf.get_mut(self.word_i)?,
|
||||
FormattedGrapheme::placeholder(),
|
||||
);
|
||||
self.word_i += 1;
|
||||
grapheme
|
||||
} else {
|
||||
self.advance_grapheme(self.visual_pos.col)?
|
||||
};
|
||||
|
||||
let pos = self.visual_pos;
|
||||
if grapheme.grapheme == Grapheme::Newline {
|
||||
self.visual_pos.row += 1;
|
||||
self.visual_pos.row += take(&mut self.virtual_lines);
|
||||
self.visual_pos.col = 0;
|
||||
self.line_pos += 1;
|
||||
} else {
|
||||
self.visual_pos.col += grapheme.width();
|
||||
}
|
||||
Some((grapheme, pos))
|
||||
}
|
||||
}
|
@ -0,0 +1,182 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use crate::doc_formatter::{DocumentFormatter, TextFormat};
|
||||
use crate::text_annotations::{InlineAnnotation, Overlay, TextAnnotations};
|
||||
|
||||
impl TextFormat {
|
||||
fn new_test(softwrap: bool) -> Self {
|
||||
TextFormat {
|
||||
soft_wrap: softwrap,
|
||||
tab_width: 2,
|
||||
max_wrap: 3,
|
||||
max_indent_retain: 4,
|
||||
wrap_indicator: ".".into(),
|
||||
wrap_indicator_highlight: None,
|
||||
// use a prime number to allow lining up too often with repeat
|
||||
viewport_width: 17,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> DocumentFormatter<'t> {
|
||||
fn collect_to_str(&mut self) -> String {
|
||||
use std::fmt::Write;
|
||||
let mut res = String::new();
|
||||
let viewport_width = self.text_fmt.viewport_width;
|
||||
let mut line = 0;
|
||||
|
||||
for (grapheme, pos) in self {
|
||||
if pos.row != line {
|
||||
line += 1;
|
||||
assert_eq!(pos.row, line);
|
||||
write!(res, "\n{}", ".".repeat(pos.col)).unwrap();
|
||||
assert!(
|
||||
pos.col <= viewport_width as usize,
|
||||
"softwrapped failed {}<={viewport_width}",
|
||||
pos.col
|
||||
);
|
||||
}
|
||||
write!(res, "{}", grapheme.grapheme).unwrap();
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
fn softwrap_text(text: &str) -> String {
|
||||
DocumentFormatter::new_at_prev_checkpoint(
|
||||
text.into(),
|
||||
&TextFormat::new_test(true),
|
||||
&TextAnnotations::default(),
|
||||
0,
|
||||
)
|
||||
.0
|
||||
.collect_to_str()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_softwrap() {
|
||||
assert_eq!(
|
||||
softwrap_text(&"foo ".repeat(10)),
|
||||
"foo foo foo foo \n.foo foo foo foo \n.foo foo "
|
||||
);
|
||||
assert_eq!(
|
||||
softwrap_text(&"fooo ".repeat(10)),
|
||||
"fooo fooo fooo \n.fooo fooo fooo \n.fooo fooo fooo \n.fooo "
|
||||
);
|
||||
|
||||
// check that we don't wrap unnecessarily
|
||||
assert_eq!(softwrap_text("\t\txxxx1xxxx2xx\n"), " xxxx1xxxx2xx \n ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn softwrap_indentation() {
|
||||
assert_eq!(
|
||||
softwrap_text("\t\tfoo1 foo2 foo3 foo4 foo5 foo6\n"),
|
||||
" foo1 foo2 \n.....foo3 foo4 \n.....foo5 foo6 \n "
|
||||
);
|
||||
assert_eq!(
|
||||
softwrap_text("\t\t\tfoo1 foo2 foo3 foo4 foo5 foo6\n"),
|
||||
" foo1 foo2 \n.foo3 foo4 foo5 \n.foo6 \n "
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_word_softwrap() {
|
||||
assert_eq!(
|
||||
softwrap_text("\t\txxxx1xxxx2xxxx3xxxx4xxxx5xxxx6xxxx7xxxx8xxxx9xxx\n"),
|
||||
" xxxx1xxxx2xxx\n.....x3xxxx4xxxx5\n.....xxxx6xxxx7xx\n.....xx8xxxx9xxx \n "
|
||||
);
|
||||
assert_eq!(
|
||||
softwrap_text("xxxxxxxx1xxxx2xxx\n"),
|
||||
"xxxxxxxx1xxxx2xxx\n. \n "
|
||||
);
|
||||
assert_eq!(
|
||||
softwrap_text("\t\txxxx1xxxx 2xxxx3xxxx4xxxx5xxxx6xxxx7xxxx8xxxx9xxx\n"),
|
||||
" xxxx1xxxx \n.....2xxxx3xxxx4x\n.....xxx5xxxx6xxx\n.....x7xxxx8xxxx9\n.....xxx \n "
|
||||
);
|
||||
assert_eq!(
|
||||
softwrap_text("\t\txxxx1xxx 2xxxx3xxxx4xxxx5xxxx6xxxx7xxxx8xxxx9xxx\n"),
|
||||
" xxxx1xxx 2xxx\n.....x3xxxx4xxxx5\n.....xxxx6xxxx7xx\n.....xx8xxxx9xxx \n "
|
||||
);
|
||||
}
|
||||
|
||||
fn overlay_text(text: &str, char_pos: usize, softwrap: bool, overlays: &[Overlay]) -> String {
|
||||
DocumentFormatter::new_at_prev_checkpoint(
|
||||
text.into(),
|
||||
&TextFormat::new_test(softwrap),
|
||||
TextAnnotations::default().add_overlay(overlays.into(), None),
|
||||
char_pos,
|
||||
)
|
||||
.0
|
||||
.collect_to_str()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overlay() {
|
||||
assert_eq!(
|
||||
overlay_text(
|
||||
"foobar",
|
||||
0,
|
||||
false,
|
||||
&[Overlay::new(0, "X"), Overlay::new(2, "\t")],
|
||||
),
|
||||
"Xo bar "
|
||||
);
|
||||
assert_eq!(
|
||||
overlay_text(
|
||||
&"foo ".repeat(10),
|
||||
0,
|
||||
true,
|
||||
&[
|
||||
Overlay::new(2, "\t"),
|
||||
Overlay::new(5, "\t"),
|
||||
Overlay::new(16, "X"),
|
||||
]
|
||||
),
|
||||
"fo f o foo \n.foo Xoo foo foo \n.foo foo foo "
|
||||
);
|
||||
}
|
||||
|
||||
fn annotate_text(text: &str, softwrap: bool, annotations: &[InlineAnnotation]) -> String {
|
||||
DocumentFormatter::new_at_prev_checkpoint(
|
||||
text.into(),
|
||||
&TextFormat::new_test(softwrap),
|
||||
TextAnnotations::default().add_inline_annotations(annotations.into(), None),
|
||||
0,
|
||||
)
|
||||
.0
|
||||
.collect_to_str()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn annotation() {
|
||||
assert_eq!(
|
||||
annotate_text("bar", false, &[InlineAnnotation::new(0, "foo")]),
|
||||
"foobar "
|
||||
);
|
||||
assert_eq!(
|
||||
annotate_text(
|
||||
&"foo ".repeat(10),
|
||||
true,
|
||||
&[InlineAnnotation::new(0, "foo ")]
|
||||
),
|
||||
"foo foo foo foo \n.foo foo foo foo \n.foo foo foo "
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn annotation_and_overlay() {
|
||||
assert_eq!(
|
||||
DocumentFormatter::new_at_prev_checkpoint(
|
||||
"bbar".into(),
|
||||
&TextFormat::new_test(false),
|
||||
TextAnnotations::default()
|
||||
.add_inline_annotations(Rc::new([InlineAnnotation::new(0, "fooo")]), None)
|
||||
.add_overlay(Rc::new([Overlay::new(0, "\t")]), None),
|
||||
0,
|
||||
)
|
||||
.0
|
||||
.collect_to_str(),
|
||||
"fooo bar "
|
||||
);
|
||||
}
|
@ -0,0 +1,235 @@
|
||||
const SEPARATOR: char = '_';
|
||||
|
||||
/// Increment an integer.
|
||||
///
|
||||
/// Supported bases:
|
||||
/// 2 with prefix 0b
|
||||
/// 8 with prefix 0o
|
||||
/// 10 with no prefix
|
||||
/// 16 with prefix 0x
|
||||
///
|
||||
/// An integer can contain `_` as a separator but may not start or end with a separator.
|
||||
/// Base 10 integers can go negative, but bases 2, 8, and 16 cannot.
|
||||
/// All addition and subtraction is saturating.
|
||||
pub fn increment(selected_text: &str, amount: i64) -> Option<String> {
|
||||
if selected_text.is_empty()
|
||||
|| selected_text.ends_with(SEPARATOR)
|
||||
|| selected_text.starts_with(SEPARATOR)
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let radix = if selected_text.starts_with("0x") {
|
||||
16
|
||||
} else if selected_text.starts_with("0o") {
|
||||
8
|
||||
} else if selected_text.starts_with("0b") {
|
||||
2
|
||||
} else {
|
||||
10
|
||||
};
|
||||
|
||||
// Get separator indexes from right to left.
|
||||
let separator_rtl_indexes: Vec<usize> = selected_text
|
||||
.chars()
|
||||
.rev()
|
||||
.enumerate()
|
||||
.filter_map(|(i, c)| if c == SEPARATOR { Some(i) } else { None })
|
||||
.collect();
|
||||
|
||||
let word: String = selected_text.chars().filter(|&c| c != SEPARATOR).collect();
|
||||
|
||||
let mut new_text = if radix == 10 {
|
||||
let number = &word;
|
||||
let value = i128::from_str_radix(number, radix).ok()?;
|
||||
let new_value = value.saturating_add(amount as i128);
|
||||
|
||||
let format_length = match (value.is_negative(), new_value.is_negative()) {
|
||||
(true, false) => number.len() - 1,
|
||||
(false, true) => number.len() + 1,
|
||||
_ => number.len(),
|
||||
} - separator_rtl_indexes.len();
|
||||
|
||||
if number.starts_with('0') || number.starts_with("-0") {
|
||||
format!("{:01$}", new_value, format_length)
|
||||
} else {
|
||||
format!("{}", new_value)
|
||||
}
|
||||
} else {
|
||||
let number = &word[2..];
|
||||
let value = u128::from_str_radix(number, radix).ok()?;
|
||||
let new_value = (value as i128).saturating_add(amount as i128);
|
||||
let new_value = if new_value < 0 { 0 } else { new_value };
|
||||
let format_length = selected_text.len() - 2 - separator_rtl_indexes.len();
|
||||
|
||||
match radix {
|
||||
2 => format!("0b{:01$b}", new_value, format_length),
|
||||
8 => format!("0o{:01$o}", new_value, format_length),
|
||||
16 => {
|
||||
let (lower_count, upper_count): (usize, usize) =
|
||||
number.chars().fold((0, 0), |(lower, upper), c| {
|
||||
(
|
||||
lower + c.is_ascii_lowercase() as usize,
|
||||
upper + c.is_ascii_uppercase() as usize,
|
||||
)
|
||||
});
|
||||
if upper_count > lower_count {
|
||||
format!("0x{:01$X}", new_value, format_length)
|
||||
} else {
|
||||
format!("0x{:01$x}", new_value, format_length)
|
||||
}
|
||||
}
|
||||
_ => unimplemented!("radix not supported: {}", radix),
|
||||
}
|
||||
};
|
||||
|
||||
// Add separators from original number.
|
||||
for &rtl_index in &separator_rtl_indexes {
|
||||
if rtl_index < new_text.len() {
|
||||
let new_index = new_text.len().saturating_sub(rtl_index);
|
||||
if new_index > 0 {
|
||||
new_text.insert(new_index, SEPARATOR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add in additional separators if necessary.
|
||||
if new_text.len() > selected_text.len() && !separator_rtl_indexes.is_empty() {
|
||||
let spacing = match separator_rtl_indexes.as_slice() {
|
||||
[.., b, a] => a - b - 1,
|
||||
_ => separator_rtl_indexes[0],
|
||||
};
|
||||
|
||||
let prefix_length = if radix == 10 { 0 } else { 2 };
|
||||
if let Some(mut index) = new_text.find(SEPARATOR) {
|
||||
while index - prefix_length > spacing {
|
||||
index -= spacing;
|
||||
new_text.insert(index, SEPARATOR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(new_text)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_decimal_numbers() {
|
||||
let tests = [
|
||||
("100", 1, "101"),
|
||||
("100", -1, "99"),
|
||||
("99", 1, "100"),
|
||||
("100", 1000, "1100"),
|
||||
("100", -1000, "-900"),
|
||||
("-1", 1, "0"),
|
||||
("-1", 2, "1"),
|
||||
("1", -1, "0"),
|
||||
("1", -2, "-1"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
assert_eq!(increment(original, amount).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_hexadecimal_numbers() {
|
||||
let tests = [
|
||||
("0x0100", 1, "0x0101"),
|
||||
("0x0100", -1, "0x00ff"),
|
||||
("0x0001", -1, "0x0000"),
|
||||
("0x0000", -1, "0x0000"),
|
||||
("0xffffffffffffffff", 1, "0x10000000000000000"),
|
||||
("0xffffffffffffffff", 2, "0x10000000000000001"),
|
||||
("0xffffffffffffffff", -1, "0xfffffffffffffffe"),
|
||||
("0xABCDEF1234567890", 1, "0xABCDEF1234567891"),
|
||||
("0xabcdef1234567890", 1, "0xabcdef1234567891"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
assert_eq!(increment(original, amount).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_octal_numbers() {
|
||||
let tests = [
|
||||
("0o0107", 1, "0o0110"),
|
||||
("0o0110", -1, "0o0107"),
|
||||
("0o0001", -1, "0o0000"),
|
||||
("0o7777", 1, "0o10000"),
|
||||
("0o1000", -1, "0o0777"),
|
||||
("0o0107", 10, "0o0121"),
|
||||
("0o0000", -1, "0o0000"),
|
||||
("0o1777777777777777777777", 1, "0o2000000000000000000000"),
|
||||
("0o1777777777777777777777", 2, "0o2000000000000000000001"),
|
||||
("0o1777777777777777777777", -1, "0o1777777777777777777776"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
assert_eq!(increment(original, amount).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_binary_numbers() {
|
||||
let tests = [
|
||||
("0b00000100", 1, "0b00000101"),
|
||||
("0b00000100", -1, "0b00000011"),
|
||||
("0b00000100", 2, "0b00000110"),
|
||||
("0b00000100", -2, "0b00000010"),
|
||||
("0b00000001", -1, "0b00000000"),
|
||||
("0b00111111", 10, "0b01001001"),
|
||||
("0b11111111", 1, "0b100000000"),
|
||||
("0b10000000", -1, "0b01111111"),
|
||||
("0b0000", -1, "0b0000"),
|
||||
(
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
1,
|
||||
"0b10000000000000000000000000000000000000000000000000000000000000000",
|
||||
),
|
||||
(
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
2,
|
||||
"0b10000000000000000000000000000000000000000000000000000000000000001",
|
||||
),
|
||||
(
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
-1,
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111110",
|
||||
),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
assert_eq!(increment(original, amount).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_with_separators() {
|
||||
let tests = [
|
||||
("999_999", 1, "1_000_000"),
|
||||
("1_000_000", -1, "999_999"),
|
||||
("-999_999", -1, "-1_000_000"),
|
||||
("0x0000_0000_0001", 0x1_ffff_0000, "0x0001_ffff_0001"),
|
||||
("0x0000_0000", -1, "0x0000_0000"),
|
||||
("0x0000_0000_0000", -1, "0x0000_0000_0000"),
|
||||
("0b01111111_11111111", 1, "0b10000000_00000000"),
|
||||
("0b11111111_11111111", 1, "0b1_00000000_00000000"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
assert_eq!(increment(original, amount).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_leading_and_trailing_separators_arent_a_match() {
|
||||
assert_eq!(increment("9_", 1), None);
|
||||
assert_eq!(increment("_9", 1), None);
|
||||
assert_eq!(increment("_9_", 1), None);
|
||||
}
|
||||
}
|
@ -1,8 +1,10 @@
|
||||
pub mod date_time;
|
||||
pub mod number;
|
||||
mod date_time;
|
||||
mod integer;
|
||||
|
||||
use crate::{Range, Tendril};
|
||||
pub fn integer(selected_text: &str, amount: i64) -> Option<String> {
|
||||
integer::increment(selected_text, amount)
|
||||
}
|
||||
|
||||
pub trait Increment {
|
||||
fn increment(&self, amount: i64) -> (Range, Tendril);
|
||||
pub fn date_time(selected_text: &str, amount: i64) -> Option<String> {
|
||||
date_time::increment(selected_text, amount)
|
||||
}
|
||||
|
@ -1,507 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use ropey::RopeSlice;
|
||||
|
||||
use super::Increment;
|
||||
|
||||
use crate::{
|
||||
textobject::{textobject_word, TextObject},
|
||||
Range, Tendril,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct NumberIncrementor<'a> {
|
||||
value: i64,
|
||||
radix: u32,
|
||||
range: Range,
|
||||
|
||||
text: RopeSlice<'a>,
|
||||
}
|
||||
|
||||
impl<'a> NumberIncrementor<'a> {
|
||||
/// Return information about number under rang if there is one.
|
||||
pub fn from_range(text: RopeSlice, range: Range) -> Option<NumberIncrementor> {
|
||||
// If the cursor is on the minus sign of a number we want to get the word textobject to the
|
||||
// right of it.
|
||||
let range = if range.to() < text.len_chars()
|
||||
&& range.to() - range.from() <= 1
|
||||
&& text.char(range.from()) == '-'
|
||||
{
|
||||
Range::new(range.from() + 1, range.to() + 1)
|
||||
} else {
|
||||
range
|
||||
};
|
||||
|
||||
let range = textobject_word(text, range, TextObject::Inside, 1, false);
|
||||
|
||||
// If there is a minus sign to the left of the word object, we want to include it in the range.
|
||||
let range = if range.from() > 0 && text.char(range.from() - 1) == '-' {
|
||||
range.extend(range.from() - 1, range.from())
|
||||
} else {
|
||||
range
|
||||
};
|
||||
|
||||
let word: String = text
|
||||
.slice(range.from()..range.to())
|
||||
.chars()
|
||||
.filter(|&c| c != '_')
|
||||
.collect();
|
||||
let (radix, prefixed) = if word.starts_with("0x") {
|
||||
(16, true)
|
||||
} else if word.starts_with("0o") {
|
||||
(8, true)
|
||||
} else if word.starts_with("0b") {
|
||||
(2, true)
|
||||
} else {
|
||||
(10, false)
|
||||
};
|
||||
|
||||
let number = if prefixed { &word[2..] } else { &word };
|
||||
|
||||
let value = i128::from_str_radix(number, radix).ok()?;
|
||||
if (value.is_positive() && value.leading_zeros() < 64)
|
||||
|| (value.is_negative() && value.leading_ones() < 64)
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let value = value as i64;
|
||||
Some(NumberIncrementor {
|
||||
range,
|
||||
value,
|
||||
radix,
|
||||
text,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Increment for NumberIncrementor<'a> {
|
||||
fn increment(&self, amount: i64) -> (Range, Tendril) {
|
||||
let old_text: Cow<str> = self.text.slice(self.range.from()..self.range.to()).into();
|
||||
let old_length = old_text.len();
|
||||
let new_value = self.value.wrapping_add(amount);
|
||||
|
||||
// Get separator indexes from right to left.
|
||||
let separator_rtl_indexes: Vec<usize> = old_text
|
||||
.chars()
|
||||
.rev()
|
||||
.enumerate()
|
||||
.filter_map(|(i, c)| if c == '_' { Some(i) } else { None })
|
||||
.collect();
|
||||
|
||||
let format_length = if self.radix == 10 {
|
||||
match (self.value.is_negative(), new_value.is_negative()) {
|
||||
(true, false) => old_length - 1,
|
||||
(false, true) => old_length + 1,
|
||||
_ => old_text.len(),
|
||||
}
|
||||
} else {
|
||||
old_text.len() - 2
|
||||
} - separator_rtl_indexes.len();
|
||||
|
||||
let mut new_text = match self.radix {
|
||||
2 => format!("0b{:01$b}", new_value, format_length),
|
||||
8 => format!("0o{:01$o}", new_value, format_length),
|
||||
10 if old_text.starts_with('0') || old_text.starts_with("-0") => {
|
||||
format!("{:01$}", new_value, format_length)
|
||||
}
|
||||
10 => format!("{}", new_value),
|
||||
16 => {
|
||||
let (lower_count, upper_count): (usize, usize) =
|
||||
old_text.chars().skip(2).fold((0, 0), |(lower, upper), c| {
|
||||
(
|
||||
lower + usize::from(c.is_ascii_lowercase()),
|
||||
upper + usize::from(c.is_ascii_uppercase()),
|
||||
)
|
||||
});
|
||||
if upper_count > lower_count {
|
||||
format!("0x{:01$X}", new_value, format_length)
|
||||
} else {
|
||||
format!("0x{:01$x}", new_value, format_length)
|
||||
}
|
||||
}
|
||||
_ => unimplemented!("radix not supported: {}", self.radix),
|
||||
};
|
||||
|
||||
// Add separators from original number.
|
||||
for &rtl_index in &separator_rtl_indexes {
|
||||
if rtl_index < new_text.len() {
|
||||
let new_index = new_text.len() - rtl_index;
|
||||
new_text.insert(new_index, '_');
|
||||
}
|
||||
}
|
||||
|
||||
// Add in additional separators if necessary.
|
||||
if new_text.len() > old_length && !separator_rtl_indexes.is_empty() {
|
||||
let spacing = match separator_rtl_indexes.as_slice() {
|
||||
[.., b, a] => a - b - 1,
|
||||
_ => separator_rtl_indexes[0],
|
||||
};
|
||||
|
||||
let prefix_length = if self.radix == 10 { 0 } else { 2 };
|
||||
if let Some(mut index) = new_text.find('_') {
|
||||
while index - prefix_length > spacing {
|
||||
index -= spacing;
|
||||
new_text.insert(index, '_');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(self.range, new_text.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::Rope;
|
||||
|
||||
#[test]
|
||||
fn test_decimal_at_point() {
|
||||
let rope = Rope::from_str("Test text 12345 more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 15),
|
||||
value: 12345,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uppercase_hexadecimal_at_point() {
|
||||
let rope = Rope::from_str("Test text 0x123ABCDEF more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 21),
|
||||
value: 0x123ABCDEF,
|
||||
radix: 16,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lowercase_hexadecimal_at_point() {
|
||||
let rope = Rope::from_str("Test text 0xfa3b4e more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 18),
|
||||
value: 0xfa3b4e,
|
||||
radix: 16,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_octal_at_point() {
|
||||
let rope = Rope::from_str("Test text 0o1074312 more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 19),
|
||||
value: 0o1074312,
|
||||
radix: 8,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_at_point() {
|
||||
let rope = Rope::from_str("Test text 0b10111010010101 more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 26),
|
||||
value: 0b10111010010101,
|
||||
radix: 2,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_negative_decimal_at_point() {
|
||||
let rope = Rope::from_str("Test text -54321 more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 16),
|
||||
value: -54321,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal_with_leading_zeroes_at_point() {
|
||||
let rope = Rope::from_str("Test text 000045326 more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 19),
|
||||
value: 45326,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_negative_decimal_cursor_on_minus_sign() {
|
||||
let rope = Rope::from_str("Test text -54321 more text.");
|
||||
let range = Range::point(10);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(10, 16),
|
||||
value: -54321,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_number_under_range_start_of_rope() {
|
||||
let rope = Rope::from_str("100");
|
||||
let range = Range::point(0);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(0, 3),
|
||||
value: 100,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_number_under_range_end_of_rope() {
|
||||
let rope = Rope::from_str("100");
|
||||
let range = Range::point(2);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(0, 3),
|
||||
value: 100,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_number_surrounded_by_punctuation() {
|
||||
let rope = Rope::from_str(",100;");
|
||||
let range = Range::point(1);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range),
|
||||
Some(NumberIncrementor {
|
||||
range: Range::new(1, 4),
|
||||
value: 100,
|
||||
radix: 10,
|
||||
text: rope.slice(..),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_not_a_number_point() {
|
||||
let rope = Rope::from_str("Test text 45326 more text.");
|
||||
let range = Range::point(6);
|
||||
assert_eq!(NumberIncrementor::from_range(rope.slice(..), range), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_number_too_large_at_point() {
|
||||
let rope = Rope::from_str("Test text 0xFFFFFFFFFFFFFFFFF more text.");
|
||||
let range = Range::point(12);
|
||||
assert_eq!(NumberIncrementor::from_range(rope.slice(..), range), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_number_cursor_one_right_of_number() {
|
||||
let rope = Rope::from_str("100 ");
|
||||
let range = Range::point(3);
|
||||
assert_eq!(NumberIncrementor::from_range(rope.slice(..), range), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_number_cursor_one_left_of_number() {
|
||||
let rope = Rope::from_str(" 100");
|
||||
let range = Range::point(0);
|
||||
assert_eq!(NumberIncrementor::from_range(rope.slice(..), range), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_decimal_numbers() {
|
||||
let tests = [
|
||||
("100", 1, "101"),
|
||||
("100", -1, "99"),
|
||||
("99", 1, "100"),
|
||||
("100", 1000, "1100"),
|
||||
("100", -1000, "-900"),
|
||||
("-1", 1, "0"),
|
||||
("-1", 2, "1"),
|
||||
("1", -1, "0"),
|
||||
("1", -2, "-1"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
let rope = Rope::from_str(original);
|
||||
let range = Range::point(0);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range)
|
||||
.unwrap()
|
||||
.increment(amount)
|
||||
.1,
|
||||
Tendril::from(expected)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_hexadecimal_numbers() {
|
||||
let tests = [
|
||||
("0x0100", 1, "0x0101"),
|
||||
("0x0100", -1, "0x00ff"),
|
||||
("0x0001", -1, "0x0000"),
|
||||
("0x0000", -1, "0xffffffffffffffff"),
|
||||
("0xffffffffffffffff", 1, "0x0000000000000000"),
|
||||
("0xffffffffffffffff", 2, "0x0000000000000001"),
|
||||
("0xffffffffffffffff", -1, "0xfffffffffffffffe"),
|
||||
("0xABCDEF1234567890", 1, "0xABCDEF1234567891"),
|
||||
("0xabcdef1234567890", 1, "0xabcdef1234567891"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
let rope = Rope::from_str(original);
|
||||
let range = Range::point(0);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range)
|
||||
.unwrap()
|
||||
.increment(amount)
|
||||
.1,
|
||||
Tendril::from(expected)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_octal_numbers() {
|
||||
let tests = [
|
||||
("0o0107", 1, "0o0110"),
|
||||
("0o0110", -1, "0o0107"),
|
||||
("0o0001", -1, "0o0000"),
|
||||
("0o7777", 1, "0o10000"),
|
||||
("0o1000", -1, "0o0777"),
|
||||
("0o0107", 10, "0o0121"),
|
||||
("0o0000", -1, "0o1777777777777777777777"),
|
||||
("0o1777777777777777777777", 1, "0o0000000000000000000000"),
|
||||
("0o1777777777777777777777", 2, "0o0000000000000000000001"),
|
||||
("0o1777777777777777777777", -1, "0o1777777777777777777776"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
let rope = Rope::from_str(original);
|
||||
let range = Range::point(0);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range)
|
||||
.unwrap()
|
||||
.increment(amount)
|
||||
.1,
|
||||
Tendril::from(expected)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_basic_binary_numbers() {
|
||||
let tests = [
|
||||
("0b00000100", 1, "0b00000101"),
|
||||
("0b00000100", -1, "0b00000011"),
|
||||
("0b00000100", 2, "0b00000110"),
|
||||
("0b00000100", -2, "0b00000010"),
|
||||
("0b00000001", -1, "0b00000000"),
|
||||
("0b00111111", 10, "0b01001001"),
|
||||
("0b11111111", 1, "0b100000000"),
|
||||
("0b10000000", -1, "0b01111111"),
|
||||
(
|
||||
"0b0000",
|
||||
-1,
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
),
|
||||
(
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
1,
|
||||
"0b0000000000000000000000000000000000000000000000000000000000000000",
|
||||
),
|
||||
(
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
2,
|
||||
"0b0000000000000000000000000000000000000000000000000000000000000001",
|
||||
),
|
||||
(
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
-1,
|
||||
"0b1111111111111111111111111111111111111111111111111111111111111110",
|
||||
),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
let rope = Rope::from_str(original);
|
||||
let range = Range::point(0);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range)
|
||||
.unwrap()
|
||||
.increment(amount)
|
||||
.1,
|
||||
Tendril::from(expected)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment_with_separators() {
|
||||
let tests = [
|
||||
("999_999", 1, "1_000_000"),
|
||||
("1_000_000", -1, "999_999"),
|
||||
("-999_999", -1, "-1_000_000"),
|
||||
("0x0000_0000_0001", 0x1_ffff_0000, "0x0001_ffff_0001"),
|
||||
("0x0000_0000_0001", 0x1_ffff_0000, "0x0001_ffff_0001"),
|
||||
("0x0000_0000_0001", 0x1_ffff_0000, "0x0001_ffff_0001"),
|
||||
("0x0000_0000", -1, "0xffff_ffff_ffff_ffff"),
|
||||
("0x0000_0000_0000", -1, "0xffff_ffff_ffff_ffff"),
|
||||
("0b01111111_11111111", 1, "0b10000000_00000000"),
|
||||
("0b11111111_11111111", 1, "0b1_00000000_00000000"),
|
||||
];
|
||||
|
||||
for (original, amount, expected) in tests {
|
||||
let rope = Rope::from_str(original);
|
||||
let range = Range::point(0);
|
||||
assert_eq!(
|
||||
NumberIncrementor::from_range(rope.slice(..), range)
|
||||
.unwrap()
|
||||
.increment(amount)
|
||||
.1,
|
||||
Tendril::from(expected)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,274 @@
|
||||
use std::cell::Cell;
|
||||
use std::convert::identity;
|
||||
use std::ops::Range;
|
||||
use std::rc::Rc;
|
||||
|
||||
use crate::syntax::Highlight;
|
||||
use crate::Tendril;
|
||||
|
||||
/// An inline annotation is continuous text shown
|
||||
/// on the screen before the grapheme that starts at
|
||||
/// `char_idx`
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct InlineAnnotation {
|
||||
pub text: Tendril,
|
||||
pub char_idx: usize,
|
||||
}
|
||||
|
||||
impl InlineAnnotation {
|
||||
pub fn new(char_idx: usize, text: impl Into<Tendril>) -> Self {
|
||||
Self {
|
||||
char_idx,
|
||||
text: text.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a **single Grapheme** that is part of the document
|
||||
/// that start at `char_idx` that will be replaced with
|
||||
/// a different `grapheme`.
|
||||
/// If `grapheme` contains multiple graphemes the text
|
||||
/// will render incorrectly.
|
||||
/// If you want to overlay multiple graphemes simply
|
||||
/// use multiple `Overlays`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// The following examples are valid overlays for the following text:
|
||||
///
|
||||
/// `aX͎̊͢͜͝͡bc`
|
||||
///
|
||||
/// ```
|
||||
/// use helix_core::text_annotations::Overlay;
|
||||
///
|
||||
/// // replaces a
|
||||
/// Overlay::new(0, "X");
|
||||
///
|
||||
/// // replaces X͎̊͢͜͝͡
|
||||
/// Overlay::new(1, "\t");
|
||||
///
|
||||
/// // replaces b
|
||||
/// Overlay::new(6, "X̢̢̟͖̲͌̋̇͑͝");
|
||||
/// ```
|
||||
///
|
||||
/// The following examples are invalid uses
|
||||
///
|
||||
/// ```
|
||||
/// use helix_core::text_annotations::Overlay;
|
||||
///
|
||||
/// // overlay is not aligned at grapheme boundary
|
||||
/// Overlay::new(3, "x");
|
||||
///
|
||||
/// // overlay contains multiple graphemes
|
||||
/// Overlay::new(0, "xy");
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Overlay {
|
||||
pub char_idx: usize,
|
||||
pub grapheme: Tendril,
|
||||
}
|
||||
|
||||
impl Overlay {
|
||||
pub fn new(char_idx: usize, grapheme: impl Into<Tendril>) -> Self {
|
||||
Self {
|
||||
char_idx,
|
||||
grapheme: grapheme.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Line annotations allow for virtual text between normal
|
||||
/// text lines. They cause `height` empty lines to be inserted
|
||||
/// below the document line that contains `anchor_char_idx`.
|
||||
///
|
||||
/// These lines can be filled with text in the rendering code
|
||||
/// as their contents have no effect beyond visual appearance.
|
||||
///
|
||||
/// To insert a line after a document line simply set
|
||||
/// `anchor_char_idx` to `doc.line_to_char(line_idx)`
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LineAnnotation {
|
||||
pub anchor_char_idx: usize,
|
||||
pub height: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Layer<A, M> {
|
||||
annotations: Rc<[A]>,
|
||||
current_index: Cell<usize>,
|
||||
metadata: M,
|
||||
}
|
||||
|
||||
impl<A, M: Clone> Clone for Layer<A, M> {
|
||||
fn clone(&self) -> Self {
|
||||
Layer {
|
||||
annotations: self.annotations.clone(),
|
||||
current_index: self.current_index.clone(),
|
||||
metadata: self.metadata.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, M> Layer<A, M> {
|
||||
pub fn reset_pos(&self, char_idx: usize, get_char_idx: impl Fn(&A) -> usize) {
|
||||
let new_index = self
|
||||
.annotations
|
||||
.binary_search_by_key(&char_idx, get_char_idx)
|
||||
.unwrap_or_else(identity);
|
||||
|
||||
self.current_index.set(new_index);
|
||||
}
|
||||
|
||||
pub fn consume(&self, char_idx: usize, get_char_idx: impl Fn(&A) -> usize) -> Option<&A> {
|
||||
let annot = self.annotations.get(self.current_index.get())?;
|
||||
debug_assert!(get_char_idx(annot) >= char_idx);
|
||||
if get_char_idx(annot) == char_idx {
|
||||
self.current_index.set(self.current_index.get() + 1);
|
||||
Some(annot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, M> From<(Rc<[A]>, M)> for Layer<A, M> {
|
||||
fn from((annotations, metadata): (Rc<[A]>, M)) -> Layer<A, M> {
|
||||
Layer {
|
||||
annotations,
|
||||
current_index: Cell::new(0),
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_pos<A, M>(layers: &[Layer<A, M>], pos: usize, get_pos: impl Fn(&A) -> usize) {
|
||||
for layer in layers {
|
||||
layer.reset_pos(pos, &get_pos)
|
||||
}
|
||||
}
|
||||
|
||||
/// Annotations that change that is displayed when the document is render.
|
||||
/// Also commonly called virtual text.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct TextAnnotations {
|
||||
inline_annotations: Vec<Layer<InlineAnnotation, Option<Highlight>>>,
|
||||
overlays: Vec<Layer<Overlay, Option<Highlight>>>,
|
||||
line_annotations: Vec<Layer<LineAnnotation, ()>>,
|
||||
}
|
||||
|
||||
impl TextAnnotations {
|
||||
/// Prepare the TextAnnotations for iteration starting at char_idx
|
||||
pub fn reset_pos(&self, char_idx: usize) {
|
||||
reset_pos(&self.inline_annotations, char_idx, |annot| annot.char_idx);
|
||||
reset_pos(&self.overlays, char_idx, |annot| annot.char_idx);
|
||||
reset_pos(&self.line_annotations, char_idx, |annot| {
|
||||
annot.anchor_char_idx
|
||||
});
|
||||
}
|
||||
|
||||
pub fn collect_overlay_highlights(
|
||||
&self,
|
||||
char_range: Range<usize>,
|
||||
) -> Vec<(usize, Range<usize>)> {
|
||||
let mut highlights = Vec::new();
|
||||
self.reset_pos(char_range.start);
|
||||
for char_idx in char_range {
|
||||
if let Some((_, Some(highlight))) = self.overlay_at(char_idx) {
|
||||
// we don't know the number of chars the original grapheme takes
|
||||
// however it doesn't matter as highlight bounderies are automatically
|
||||
// aligned to grapheme boundaries in the rendering code
|
||||
highlights.push((highlight.0, char_idx..char_idx + 1))
|
||||
}
|
||||
}
|
||||
|
||||
highlights
|
||||
}
|
||||
|
||||
/// Add new inline annotations.
|
||||
///
|
||||
/// The annotations grapheme will be rendered with `highlight`
|
||||
/// patched on top of `ui.text`.
|
||||
///
|
||||
/// The annotations **must be sorted** by their `char_idx`.
|
||||
/// Multiple annotations with the same `char_idx` are allowed,
|
||||
/// they will be display in the order that they are present in the layer.
|
||||
///
|
||||
/// If multiple layers contain annotations at the same position
|
||||
/// the annotations that belong to the layers added first will be shown first.
|
||||
pub fn add_inline_annotations(
|
||||
&mut self,
|
||||
layer: Rc<[InlineAnnotation]>,
|
||||
highlight: Option<Highlight>,
|
||||
) -> &mut Self {
|
||||
self.inline_annotations.push((layer, highlight).into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add new grapheme overlays.
|
||||
///
|
||||
/// The overlayed grapheme will be rendered with `highlight`
|
||||
/// patched on top of `ui.text`.
|
||||
///
|
||||
/// The overlays **must be sorted** by their `char_idx`.
|
||||
/// Multiple overlays with the same `char_idx` **are allowed**.
|
||||
///
|
||||
/// If multiple layers contain overlay at the same position
|
||||
/// the overlay from the layer added last will be show.
|
||||
pub fn add_overlay(&mut self, layer: Rc<[Overlay]>, highlight: Option<Highlight>) -> &mut Self {
|
||||
self.overlays.push((layer, highlight).into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add new annotation lines.
|
||||
///
|
||||
/// The line annotations **must be sorted** by their `char_idx`.
|
||||
/// Multiple line annotations with the same `char_idx` **are not allowed**.
|
||||
pub fn add_line_annotation(&mut self, layer: Rc<[LineAnnotation]>) -> &mut Self {
|
||||
self.line_annotations.push((layer, ()).into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Removes all line annotations, useful for vertical motions
|
||||
/// so that virtual text lines are automatically skipped.
|
||||
pub fn clear_line_annotations(&mut self) {
|
||||
self.line_annotations.clear();
|
||||
}
|
||||
|
||||
pub(crate) fn next_inline_annotation_at(
|
||||
&self,
|
||||
char_idx: usize,
|
||||
) -> Option<(&InlineAnnotation, Option<Highlight>)> {
|
||||
self.inline_annotations.iter().find_map(|layer| {
|
||||
let annotation = layer.consume(char_idx, |annot| annot.char_idx)?;
|
||||
Some((annotation, layer.metadata))
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn overlay_at(&self, char_idx: usize) -> Option<(&Overlay, Option<Highlight>)> {
|
||||
let mut overlay = None;
|
||||
for layer in &self.overlays {
|
||||
while let Some(new_overlay) = layer.consume(char_idx, |annot| annot.char_idx) {
|
||||
overlay = Some((new_overlay, layer.metadata));
|
||||
}
|
||||
}
|
||||
overlay
|
||||
}
|
||||
|
||||
pub(crate) fn annotation_lines_at(&self, char_idx: usize) -> usize {
|
||||
self.line_annotations
|
||||
.iter()
|
||||
.map(|layer| {
|
||||
let mut lines = 0;
|
||||
while let Some(annot) = layer.annotations.get(layer.current_index.get()) {
|
||||
if annot.anchor_char_idx == char_idx {
|
||||
layer.current_index.set(layer.current_index.get() + 1);
|
||||
lines += annot.height
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
lines
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "helix-parsec"
|
||||
version = "0.6.0"
|
||||
authors = ["Blaž Hrastnik <blaz@mxxn.io>"]
|
||||
edition = "2021"
|
||||
license = "MPL-2.0"
|
||||
description = "Parser combinators for Helix"
|
||||
categories = ["editor"]
|
||||
repository = "https://github.com/helix-editor/helix"
|
||||
homepage = "https://helix-editor.com"
|
||||
include = ["src/**/*", "README.md"]
|
||||
|
||||
[dependencies]
|
@ -0,0 +1,574 @@
|
||||
//! Parser-combinator functions
|
||||
//!
|
||||
//! This module provides parsers and parser combinators which can be used
|
||||
//! together to build parsers by functional composition.
|
||||
|
||||
// This module implements parser combinators following https://bodil.lol/parser-combinators/.
|
||||
// `sym` (trait implementation for `&'static str`), `map`, `pred` (filter), `one_or_more`,
|
||||
// `zero_or_more`, as well as the `Parser` trait originate mostly from that post.
|
||||
// The remaining parsers and parser combinators are either based on
|
||||
// https://github.com/archseer/snippets.nvim/blob/a583da6ef130d2a4888510afd8c4e5ffd62d0dce/lua/snippet/parser.lua#L5-L138
|
||||
// or are novel.
|
||||
|
||||
// When a parser matches the input successfully, it returns `Ok((next_input, some_value))`
|
||||
// where the type of the returned value depends on the parser. If the parser fails to match,
|
||||
// it returns `Err(input)`.
|
||||
type ParseResult<'a, Output> = Result<(&'a str, Output), &'a str>;
|
||||
|
||||
/// A parser or parser-combinator.
|
||||
///
|
||||
/// Parser-combinators compose multiple parsers together to parse input.
|
||||
/// For example, two basic parsers (`&'static str`s) may be combined with
|
||||
/// a parser-combinator like [or] to produce a new parser.
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{or, Parser};
|
||||
/// let foo = "foo"; // matches "foo" literally
|
||||
/// let bar = "bar"; // matches "bar" literally
|
||||
/// let foo_or_bar = or(foo, bar); // matches either "foo" or "bar"
|
||||
/// assert_eq!(Ok(("", "foo")), foo_or_bar.parse("foo"));
|
||||
/// assert_eq!(Ok(("", "bar")), foo_or_bar.parse("bar"));
|
||||
/// assert_eq!(Err("baz"), foo_or_bar.parse("baz"));
|
||||
/// ```
|
||||
pub trait Parser<'a> {
|
||||
type Output;
|
||||
|
||||
fn parse(&self, input: &'a str) -> ParseResult<'a, Self::Output>;
|
||||
}
|
||||
|
||||
// Most parser-combinators are written as higher-order functions which take some
|
||||
// parser(s) as input and return a new parser: a function that takes input and returns
|
||||
// a parse result. The underlying implementation of [Parser::parse] for these functions
|
||||
// is simply application.
|
||||
#[doc(hidden)]
|
||||
impl<'a, F, T> Parser<'a> for F
|
||||
where
|
||||
F: Fn(&'a str) -> ParseResult<T>,
|
||||
{
|
||||
type Output = T;
|
||||
|
||||
fn parse(&self, input: &'a str) -> ParseResult<'a, Self::Output> {
|
||||
self(input)
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser which matches the string literal exactly.
|
||||
///
|
||||
/// This parser succeeds if the next characters in the input are equal to the given
|
||||
/// string literal.
|
||||
///
|
||||
/// Note that [str::parse] interferes with calling [Parser::parse] on string literals
|
||||
/// directly; this trait implementation works when used within any parser combinator
|
||||
/// but does not work on its own. To call [Parser::parse] on a parser for a string
|
||||
/// literal, use the [token] parser.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{or, Parser};
|
||||
/// let parser = or("foo", "bar");
|
||||
/// assert_eq!(Ok(("", "foo")), parser.parse("foo"));
|
||||
/// assert_eq!(Ok(("", "bar")), parser.parse("bar"));
|
||||
/// assert_eq!(Err("baz"), parser.parse("baz"));
|
||||
/// ```
|
||||
impl<'a> Parser<'a> for &'static str {
|
||||
type Output = &'a str;
|
||||
|
||||
fn parse(&self, input: &'a str) -> ParseResult<'a, Self::Output> {
|
||||
match input.get(0..self.len()) {
|
||||
Some(actual) if actual == *self => Ok((&input[self.len()..], &input[0..self.len()])),
|
||||
_ => Err(input),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parsers
|
||||
|
||||
/// A parser which matches the given string literally.
|
||||
///
|
||||
/// This function is a convenience for interpreting string literals as parsers
|
||||
/// and is only necessary to avoid conflict with [str::parse]. See the documentation
|
||||
/// for the `&'static str` implementation of [Parser].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{token, Parser};
|
||||
/// let parser = token("foo");
|
||||
/// assert_eq!(Ok(("", "foo")), parser.parse("foo"));
|
||||
/// assert_eq!(Err("bar"), parser.parse("bar"));
|
||||
/// ```
|
||||
pub fn token<'a>(literal: &'static str) -> impl Parser<'a, Output = &'a str> {
|
||||
literal
|
||||
}
|
||||
|
||||
/// A parser which matches all values until the specified pattern is found.
|
||||
///
|
||||
/// If the pattern is not found, this parser does not match. The input up to the
|
||||
/// character which returns `true` is returned but not that character itself.
|
||||
///
|
||||
/// If the pattern function returns true on the first input character, this
|
||||
/// parser fails.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{take_until, Parser};
|
||||
/// let parser = take_until(|c| c == '.');
|
||||
/// assert_eq!(Ok((".bar", "foo")), parser.parse("foo.bar"));
|
||||
/// assert_eq!(Err(".foo"), parser.parse(".foo"));
|
||||
/// assert_eq!(Err("foo"), parser.parse("foo"));
|
||||
/// ```
|
||||
pub fn take_until<'a, F>(pattern: F) -> impl Parser<'a, Output = &'a str>
|
||||
where
|
||||
F: Fn(char) -> bool,
|
||||
{
|
||||
move |input: &'a str| match input.find(&pattern) {
|
||||
Some(index) if index != 0 => Ok((&input[index..], &input[0..index])),
|
||||
_ => Err(input),
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser which matches all values until the specified pattern no longer match.
|
||||
///
|
||||
/// This parser only ever fails if the input has a length of zero.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{take_while, Parser};
|
||||
/// let parser = take_while(|c| c == '1');
|
||||
/// assert_eq!(Ok(("2", "11")), parser.parse("112"));
|
||||
/// assert_eq!(Err("22"), parser.parse("22"));
|
||||
/// ```
|
||||
pub fn take_while<'a, F>(pattern: F) -> impl Parser<'a, Output = &'a str>
|
||||
where
|
||||
F: Fn(char) -> bool,
|
||||
{
|
||||
move |input: &'a str| match input
|
||||
.char_indices()
|
||||
.take_while(|(_p, c)| pattern(*c))
|
||||
.last()
|
||||
{
|
||||
Some((index, c)) => {
|
||||
let index = index + c.len_utf8();
|
||||
Ok((&input[index..], &input[0..index]))
|
||||
}
|
||||
_ => Err(input),
|
||||
}
|
||||
}
|
||||
|
||||
// Variadic parser combinators
|
||||
|
||||
/// A parser combinator which matches a sequence of parsers in an all-or-nothing fashion.
|
||||
///
|
||||
/// The returned value is a tuple containing the outputs of all parsers in order. Each
|
||||
/// parser in the sequence may be typed differently.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{seq, Parser};
|
||||
/// let parser = seq!("<", "a", ">");
|
||||
/// assert_eq!(Ok(("", ("<", "a", ">"))), parser.parse("<a>"));
|
||||
/// assert_eq!(Err("<b>"), parser.parse("<b>"));
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! seq {
|
||||
($($parsers: expr),+ $(,)?) => {
|
||||
($($parsers),+)
|
||||
}
|
||||
}
|
||||
|
||||
// Seq is implemented using trait-implementations of Parser for various size tuples.
|
||||
// This allows sequences to be typed heterogeneously.
|
||||
macro_rules! seq_impl {
|
||||
($($parser:ident),+) => {
|
||||
#[allow(non_snake_case)]
|
||||
impl<'a, $($parser),+> Parser<'a> for ($($parser),+)
|
||||
where
|
||||
$($parser: Parser<'a>),+
|
||||
{
|
||||
type Output = ($($parser::Output),+);
|
||||
|
||||
fn parse(&self, input: &'a str) -> ParseResult<'a, Self::Output> {
|
||||
let ($($parser),+) = self;
|
||||
seq_body_impl!(input, input, $($parser),+ ; )
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! seq_body_impl {
|
||||
($input:expr, $next_input:expr, $head:ident, $($tail:ident),+ ; $(,)? $($acc:ident),*) => {
|
||||
match $head.parse($next_input) {
|
||||
Ok((next_input, $head)) => seq_body_impl!($input, next_input, $($tail),+ ; $($acc),*, $head),
|
||||
Err(_) => Err($input),
|
||||
}
|
||||
};
|
||||
($input:expr, $next_input:expr, $last:ident ; $(,)? $($acc:ident),*) => {
|
||||
match $last.parse($next_input) {
|
||||
Ok((next_input, last)) => Ok((next_input, ($($acc),+, last))),
|
||||
Err(_) => Err($input),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seq_impl!(A, B);
|
||||
seq_impl!(A, B, C);
|
||||
seq_impl!(A, B, C, D);
|
||||
seq_impl!(A, B, C, D, E);
|
||||
seq_impl!(A, B, C, D, E, F);
|
||||
seq_impl!(A, B, C, D, E, F, G);
|
||||
seq_impl!(A, B, C, D, E, F, G, H);
|
||||
seq_impl!(A, B, C, D, E, F, G, H, I);
|
||||
seq_impl!(A, B, C, D, E, F, G, H, I, J);
|
||||
|
||||
/// A parser combinator which chooses the first of the input parsers which matches
|
||||
/// successfully.
|
||||
///
|
||||
/// All input parsers must have the same output type. This is a variadic form for [or].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{choice, or, Parser};
|
||||
/// let parser = choice!("foo", "bar", "baz");
|
||||
/// assert_eq!(Ok(("", "foo")), parser.parse("foo"));
|
||||
/// assert_eq!(Ok(("", "bar")), parser.parse("bar"));
|
||||
/// assert_eq!(Err("quiz"), parser.parse("quiz"));
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! choice {
|
||||
($parser: expr $(,)?) => {
|
||||
$parser
|
||||
};
|
||||
($parser: expr, $($rest: expr),+ $(,)?) => {
|
||||
or($parser, choice!($($rest),+))
|
||||
}
|
||||
}
|
||||
|
||||
// Ordinary parser combinators
|
||||
|
||||
/// A parser combinator which takes a parser as input and maps the output using the
|
||||
/// given transformation function.
|
||||
///
|
||||
/// This corresponds to [Result::map]. The value is only mapped if the input parser
|
||||
/// matches against input.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{map, Parser};
|
||||
/// let parser = map("123", |s| s.parse::<i32>().unwrap());
|
||||
/// assert_eq!(Ok(("", 123)), parser.parse("123"));
|
||||
/// assert_eq!(Err("abc"), parser.parse("abc"));
|
||||
/// ```
|
||||
pub fn map<'a, P, F, T>(parser: P, map_fn: F) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
P: Parser<'a>,
|
||||
F: Fn(P::Output) -> T,
|
||||
{
|
||||
move |input| {
|
||||
parser
|
||||
.parse(input)
|
||||
.map(|(next_input, result)| (next_input, map_fn(result)))
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which succeeds if the given parser matches the input and
|
||||
/// the given `filter_map_fn` returns `Some`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{filter_map, take_until, Parser};
|
||||
/// let parser = filter_map(take_until(|c| c == '.'), |s| s.parse::<i32>().ok());
|
||||
/// assert_eq!(Ok((".456", 123)), parser.parse("123.456"));
|
||||
/// assert_eq!(Err("abc.def"), parser.parse("abc.def"));
|
||||
/// ```
|
||||
pub fn filter_map<'a, P, F, T>(parser: P, filter_map_fn: F) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
P: Parser<'a>,
|
||||
F: Fn(P::Output) -> Option<T>,
|
||||
{
|
||||
move |input| match parser.parse(input) {
|
||||
Ok((next_input, value)) => match filter_map_fn(value) {
|
||||
Some(value) => Ok((next_input, value)),
|
||||
None => Err(input),
|
||||
},
|
||||
Err(_) => Err(input),
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which succeeds if the first given parser matches the input and
|
||||
/// the second given parse also matches.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{reparse_as, take_until, one_or_more, Parser};
|
||||
/// let parser = reparse_as(take_until(|c| c == '/'), one_or_more("a"));
|
||||
/// assert_eq!(Ok(("/bb", vec!["a", "a"])), parser.parse("aa/bb"));
|
||||
/// ```
|
||||
pub fn reparse_as<'a, P1, P2, T>(parser1: P1, parser2: P2) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
P1: Parser<'a, Output = &'a str>,
|
||||
P2: Parser<'a, Output = T>,
|
||||
{
|
||||
filter_map(parser1, move |str| {
|
||||
parser2.parse(str).map(|(_, value)| value).ok()
|
||||
})
|
||||
}
|
||||
|
||||
/// A parser combinator which only matches the input when the predicate function
|
||||
/// returns true.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{filter, take_until, Parser};
|
||||
/// let parser = filter(take_until(|c| c == '.'), |s| s == &"123");
|
||||
/// assert_eq!(Ok((".456", "123")), parser.parse("123.456"));
|
||||
/// assert_eq!(Err("456.123"), parser.parse("456.123"));
|
||||
/// ```
|
||||
pub fn filter<'a, P, F, T>(parser: P, pred_fn: F) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
P: Parser<'a, Output = T>,
|
||||
F: Fn(&P::Output) -> bool,
|
||||
{
|
||||
move |input| {
|
||||
if let Ok((next_input, value)) = parser.parse(input) {
|
||||
if pred_fn(&value) {
|
||||
return Ok((next_input, value));
|
||||
}
|
||||
}
|
||||
Err(input)
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which matches either of the input parsers.
|
||||
///
|
||||
/// Both parsers must have the same output type. For a variadic form which
|
||||
/// can take any number of parsers, use `choice!`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{or, Parser};
|
||||
/// let parser = or("foo", "bar");
|
||||
/// assert_eq!(Ok(("", "foo")), parser.parse("foo"));
|
||||
/// assert_eq!(Ok(("", "bar")), parser.parse("bar"));
|
||||
/// assert_eq!(Err("baz"), parser.parse("baz"));
|
||||
/// ```
|
||||
pub fn or<'a, P1, P2, T>(parser1: P1, parser2: P2) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
P1: Parser<'a, Output = T>,
|
||||
P2: Parser<'a, Output = T>,
|
||||
{
|
||||
move |input| match parser1.parse(input) {
|
||||
ok @ Ok(_) => ok,
|
||||
Err(_) => parser2.parse(input),
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which attempts to match the given parser, returning a
|
||||
/// `None` output value if the parser does not match.
|
||||
///
|
||||
/// The parser produced with this combinator always succeeds. If the given parser
|
||||
/// succeeds, `Some(value)` is returned where `value` is the output of the given
|
||||
/// parser. Otherwise, `None`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{optional, Parser};
|
||||
/// let parser = optional("foo");
|
||||
/// assert_eq!(Ok(("bar", Some("foo"))), parser.parse("foobar"));
|
||||
/// assert_eq!(Ok(("bar", None)), parser.parse("bar"));
|
||||
/// ```
|
||||
pub fn optional<'a, P, T>(parser: P) -> impl Parser<'a, Output = Option<T>>
|
||||
where
|
||||
P: Parser<'a, Output = T>,
|
||||
{
|
||||
move |input| match parser.parse(input) {
|
||||
Ok((next_input, value)) => Ok((next_input, Some(value))),
|
||||
Err(_) => Ok((input, None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which runs the given parsers in sequence and returns the
|
||||
/// value of `left` if both are matched.
|
||||
///
|
||||
/// This is useful for two-element sequences in which you only want the output
|
||||
/// value of the `left` parser.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{left, Parser};
|
||||
/// let parser = left("foo", "bar");
|
||||
/// assert_eq!(Ok(("", "foo")), parser.parse("foobar"));
|
||||
/// ```
|
||||
pub fn left<'a, L, R, T>(left: L, right: R) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
L: Parser<'a, Output = T>,
|
||||
R: Parser<'a>,
|
||||
{
|
||||
map(seq!(left, right), |(left_value, _)| left_value)
|
||||
}
|
||||
|
||||
/// A parser combinator which runs the given parsers in sequence and returns the
|
||||
/// value of `right` if both are matched.
|
||||
///
|
||||
/// This is useful for two-element sequences in which you only want the output
|
||||
/// value of the `right` parser.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{right, Parser};
|
||||
/// let parser = right("foo", "bar");
|
||||
/// assert_eq!(Ok(("", "bar")), parser.parse("foobar"));
|
||||
/// ```
|
||||
pub fn right<'a, L, R, T>(left: L, right: R) -> impl Parser<'a, Output = T>
|
||||
where
|
||||
L: Parser<'a>,
|
||||
R: Parser<'a, Output = T>,
|
||||
{
|
||||
map(seq!(left, right), |(_, right_value)| right_value)
|
||||
}
|
||||
|
||||
/// A parser combinator which matches the given parser against the input zero or
|
||||
/// more times.
|
||||
///
|
||||
/// This parser always succeeds and returns the empty Vec when it matched zero
|
||||
/// times.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{zero_or_more, Parser};
|
||||
/// let parser = zero_or_more("a");
|
||||
/// assert_eq!(Ok(("", vec![])), parser.parse(""));
|
||||
/// assert_eq!(Ok(("", vec!["a"])), parser.parse("a"));
|
||||
/// assert_eq!(Ok(("", vec!["a", "a"])), parser.parse("aa"));
|
||||
/// assert_eq!(Ok(("bb", vec![])), parser.parse("bb"));
|
||||
/// ```
|
||||
pub fn zero_or_more<'a, P, T>(parser: P) -> impl Parser<'a, Output = Vec<T>>
|
||||
where
|
||||
P: Parser<'a, Output = T>,
|
||||
{
|
||||
let parser = non_empty(parser);
|
||||
move |mut input| {
|
||||
let mut values = Vec::new();
|
||||
|
||||
while let Ok((next_input, value)) = parser.parse(input) {
|
||||
input = next_input;
|
||||
values.push(value);
|
||||
}
|
||||
|
||||
Ok((input, values))
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which matches the given parser against the input one or
|
||||
/// more times.
|
||||
///
|
||||
/// This parser combinator acts the same as [zero_or_more] but must match at
|
||||
/// least once.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use helix_parsec::{one_or_more, Parser};
|
||||
/// let parser = one_or_more("a");
|
||||
/// assert_eq!(Err(""), parser.parse(""));
|
||||
/// assert_eq!(Ok(("", vec!["a"])), parser.parse("a"));
|
||||
/// assert_eq!(Ok(("", vec!["a", "a"])), parser.parse("aa"));
|
||||
/// assert_eq!(Err("bb"), parser.parse("bb"));
|
||||
/// ```
|
||||
pub fn one_or_more<'a, P, T>(parser: P) -> impl Parser<'a, Output = Vec<T>>
|
||||
where
|
||||
P: Parser<'a, Output = T>,
|
||||
{
|
||||
let parser = non_empty(parser);
|
||||
move |mut input| {
|
||||
let mut values = Vec::new();
|
||||
|
||||
match parser.parse(input) {
|
||||
Ok((next_input, value)) => {
|
||||
input = next_input;
|
||||
values.push(value);
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
|
||||
while let Ok((next_input, value)) = parser.parse(input) {
|
||||
input = next_input;
|
||||
values.push(value);
|
||||
}
|
||||
|
||||
Ok((input, values))
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser combinator which matches one or more instances of the given parser
|
||||
/// interspersed with the separator parser.
|
||||
///
|
||||
/// Output values of the separator parser are discarded.
|
||||
///
|
||||
/// This is typically used to parse function arguments or list items.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use helix_parsec::{sep, Parser};
|
||||
/// let parser = sep("a", ",");
|
||||
/// assert_eq!(Ok(("", vec!["a", "a", "a"])), parser.parse("a,a,a"));
|
||||
/// ```
|
||||
pub fn sep<'a, P, S, T>(parser: P, separator: S) -> impl Parser<'a, Output = Vec<T>>
|
||||
where
|
||||
P: Parser<'a, Output = T>,
|
||||
S: Parser<'a>,
|
||||
{
|
||||
move |mut input| {
|
||||
let mut values = Vec::new();
|
||||
|
||||
match parser.parse(input) {
|
||||
Ok((next_input, value)) => {
|
||||
input = next_input;
|
||||
values.push(value);
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
|
||||
loop {
|
||||
match separator.parse(input) {
|
||||
Ok((next_input, _)) => input = next_input,
|
||||
Err(_) => break,
|
||||
}
|
||||
|
||||
match parser.parse(input) {
|
||||
Ok((next_input, value)) => {
|
||||
input = next_input;
|
||||
values.push(value);
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
|
||||
Ok((input, values))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn non_empty<'a, T>(p: impl Parser<'a, Output = T>) -> impl Parser<'a, Output = T> {
|
||||
move |input| {
|
||||
let (new_input, res) = p.parse(input)?;
|
||||
if new_input.len() == input.len() {
|
||||
Err(input)
|
||||
} else {
|
||||
Ok((new_input, res))
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,485 @@
|
||||
use std::cmp::min;
|
||||
|
||||
use helix_core::doc_formatter::{DocumentFormatter, GraphemeSource, TextFormat};
|
||||
use helix_core::graphemes::Grapheme;
|
||||
use helix_core::str_utils::char_to_byte_idx;
|
||||
use helix_core::syntax::Highlight;
|
||||
use helix_core::syntax::HighlightEvent;
|
||||
use helix_core::text_annotations::TextAnnotations;
|
||||
use helix_core::{visual_offset_from_block, Position, RopeSlice};
|
||||
use helix_view::editor::{WhitespaceConfig, WhitespaceRenderValue};
|
||||
use helix_view::graphics::Rect;
|
||||
use helix_view::theme::Style;
|
||||
use helix_view::view::ViewPosition;
|
||||
use helix_view::Document;
|
||||
use helix_view::Theme;
|
||||
use tui::buffer::Buffer as Surface;
|
||||
|
||||
pub trait LineDecoration {
|
||||
fn render_background(&mut self, _renderer: &mut TextRenderer, _pos: LinePos) {}
|
||||
fn render_foreground(
|
||||
&mut self,
|
||||
_renderer: &mut TextRenderer,
|
||||
_pos: LinePos,
|
||||
_end_char_idx: usize,
|
||||
) {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: FnMut(&mut TextRenderer, LinePos)> LineDecoration for F {
|
||||
fn render_background(&mut self, renderer: &mut TextRenderer, pos: LinePos) {
|
||||
self(renderer, pos)
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around a HighlightIterator
|
||||
/// that merges the layered highlights to create the final text style
|
||||
/// and yields the active text style and the char_idx where the active
|
||||
/// style will have to be recomputed.
|
||||
struct StyleIter<'a, H: Iterator<Item = HighlightEvent>> {
|
||||
text_style: Style,
|
||||
active_highlights: Vec<Highlight>,
|
||||
highlight_iter: H,
|
||||
theme: &'a Theme,
|
||||
}
|
||||
|
||||
impl<H: Iterator<Item = HighlightEvent>> Iterator for StyleIter<'_, H> {
|
||||
type Item = (Style, usize);
|
||||
fn next(&mut self) -> Option<(Style, usize)> {
|
||||
while let Some(event) = self.highlight_iter.next() {
|
||||
match event {
|
||||
HighlightEvent::HighlightStart(highlights) => {
|
||||
self.active_highlights.push(highlights)
|
||||
}
|
||||
HighlightEvent::HighlightEnd => {
|
||||
self.active_highlights.pop();
|
||||
}
|
||||
HighlightEvent::Source { start, end } => {
|
||||
if start == end {
|
||||
continue;
|
||||
}
|
||||
let style = self
|
||||
.active_highlights
|
||||
.iter()
|
||||
.fold(self.text_style, |acc, span| {
|
||||
acc.patch(self.theme.highlight(span.0))
|
||||
});
|
||||
return Some((style, end));
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct LinePos {
|
||||
/// Indicates whether the given visual line
|
||||
/// is the first visual line of the given document line
|
||||
pub first_visual_line: bool,
|
||||
/// The line index of the document line that contains the given visual line
|
||||
pub doc_line: usize,
|
||||
/// Vertical offset from the top of the inner view area
|
||||
pub visual_line: u16,
|
||||
/// The first char index of this visual line.
|
||||
/// Note that if the visual line is entirely filled by
|
||||
/// a very long inline virtual text then this index will point
|
||||
/// at the next (non-virtual) char after this visual line
|
||||
pub start_char_idx: usize,
|
||||
}
|
||||
|
||||
pub type TranslatedPosition<'a> = (usize, Box<dyn FnMut(&mut TextRenderer, Position) + 'a>);
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn render_document(
|
||||
surface: &mut Surface,
|
||||
viewport: Rect,
|
||||
doc: &Document,
|
||||
offset: ViewPosition,
|
||||
doc_annotations: &TextAnnotations,
|
||||
highlight_iter: impl Iterator<Item = HighlightEvent>,
|
||||
theme: &Theme,
|
||||
line_decoration: &mut [Box<dyn LineDecoration + '_>],
|
||||
translated_positions: &mut [TranslatedPosition],
|
||||
) {
|
||||
let mut renderer = TextRenderer::new(surface, doc, theme, offset.horizontal_offset, viewport);
|
||||
render_text(
|
||||
&mut renderer,
|
||||
doc.text().slice(..),
|
||||
offset,
|
||||
&doc.text_format(viewport.width, Some(theme)),
|
||||
doc_annotations,
|
||||
highlight_iter,
|
||||
theme,
|
||||
line_decoration,
|
||||
translated_positions,
|
||||
)
|
||||
}
|
||||
|
||||
fn translate_positions(
|
||||
char_pos: usize,
|
||||
first_visisble_char_idx: usize,
|
||||
translated_positions: &mut [TranslatedPosition],
|
||||
text_fmt: &TextFormat,
|
||||
renderer: &mut TextRenderer,
|
||||
pos: Position,
|
||||
) {
|
||||
// check if any positions translated on the fly (like cursor) has been reached
|
||||
for (char_idx, callback) in &mut *translated_positions {
|
||||
if *char_idx < char_pos && *char_idx >= first_visisble_char_idx {
|
||||
// by replacing the char_index with usize::MAX large number we ensure
|
||||
// that the same position is only translated once
|
||||
// text will never reach usize::MAX as rust memory allocations are limited
|
||||
// to isize::MAX
|
||||
*char_idx = usize::MAX;
|
||||
|
||||
if text_fmt.soft_wrap {
|
||||
callback(renderer, pos)
|
||||
} else if pos.col >= renderer.col_offset
|
||||
&& pos.col - renderer.col_offset < renderer.viewport.width as usize
|
||||
{
|
||||
callback(
|
||||
renderer,
|
||||
Position {
|
||||
row: pos.row,
|
||||
col: pos.col - renderer.col_offset,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn render_text<'t>(
|
||||
renderer: &mut TextRenderer,
|
||||
text: RopeSlice<'t>,
|
||||
offset: ViewPosition,
|
||||
text_fmt: &TextFormat,
|
||||
text_annotations: &TextAnnotations,
|
||||
highlight_iter: impl Iterator<Item = HighlightEvent>,
|
||||
theme: &Theme,
|
||||
line_decorations: &mut [Box<dyn LineDecoration + '_>],
|
||||
translated_positions: &mut [TranslatedPosition],
|
||||
) {
|
||||
let (
|
||||
Position {
|
||||
row: mut row_off, ..
|
||||
},
|
||||
mut char_pos,
|
||||
) = visual_offset_from_block(
|
||||
text,
|
||||
offset.anchor,
|
||||
offset.anchor,
|
||||
text_fmt,
|
||||
text_annotations,
|
||||
);
|
||||
row_off += offset.vertical_offset;
|
||||
assert_eq!(0, offset.vertical_offset);
|
||||
|
||||
let (mut formatter, mut first_visible_char_idx) =
|
||||
DocumentFormatter::new_at_prev_checkpoint(text, text_fmt, text_annotations, offset.anchor);
|
||||
let mut styles = StyleIter {
|
||||
text_style: renderer.text_style,
|
||||
active_highlights: Vec::with_capacity(64),
|
||||
highlight_iter,
|
||||
theme,
|
||||
};
|
||||
|
||||
let mut last_line_pos = LinePos {
|
||||
first_visual_line: false,
|
||||
doc_line: usize::MAX,
|
||||
visual_line: u16::MAX,
|
||||
start_char_idx: usize::MAX,
|
||||
};
|
||||
let mut is_in_indent_area = true;
|
||||
let mut last_line_indent_level = 0;
|
||||
let mut style_span = styles
|
||||
.next()
|
||||
.unwrap_or_else(|| (Style::default(), usize::MAX));
|
||||
|
||||
loop {
|
||||
// formattter.line_pos returns to line index of the next grapheme
|
||||
// so it must be called before formatter.next
|
||||
let doc_line = formatter.line_pos();
|
||||
let Some((grapheme, mut pos)) = formatter.next() else {
|
||||
let mut last_pos = formatter.visual_pos();
|
||||
if last_pos.row >= row_off {
|
||||
last_pos.col -= 1;
|
||||
last_pos.row -= row_off;
|
||||
// check if any positions translated on the fly (like cursor) are at the EOF
|
||||
translate_positions(
|
||||
char_pos + 1,
|
||||
first_visible_char_idx,
|
||||
translated_positions,
|
||||
text_fmt,
|
||||
renderer,
|
||||
last_pos,
|
||||
);
|
||||
}
|
||||
break;
|
||||
};
|
||||
|
||||
// skip any graphemes on visual lines before the block start
|
||||
if pos.row < row_off {
|
||||
if char_pos >= style_span.1 {
|
||||
style_span = if let Some(style_span) = styles.next() {
|
||||
style_span
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
char_pos += grapheme.doc_chars();
|
||||
first_visible_char_idx = char_pos + 1;
|
||||
continue;
|
||||
}
|
||||
pos.row -= row_off;
|
||||
|
||||
// if the end of the viewport is reached stop rendering
|
||||
if pos.row as u16 >= renderer.viewport.height {
|
||||
break;
|
||||
}
|
||||
|
||||
// apply decorations before rendering a new line
|
||||
if pos.row as u16 != last_line_pos.visual_line {
|
||||
if pos.row > 0 {
|
||||
renderer.draw_indent_guides(last_line_indent_level, last_line_pos.visual_line);
|
||||
is_in_indent_area = true;
|
||||
for line_decoration in &mut *line_decorations {
|
||||
line_decoration.render_foreground(renderer, last_line_pos, char_pos);
|
||||
}
|
||||
}
|
||||
last_line_pos = LinePos {
|
||||
first_visual_line: doc_line != last_line_pos.doc_line,
|
||||
doc_line,
|
||||
visual_line: pos.row as u16,
|
||||
start_char_idx: char_pos,
|
||||
};
|
||||
for line_decoration in &mut *line_decorations {
|
||||
line_decoration.render_background(renderer, last_line_pos);
|
||||
}
|
||||
}
|
||||
|
||||
// aquire the correct grapheme style
|
||||
if char_pos >= style_span.1 {
|
||||
style_span = styles.next().unwrap_or((Style::default(), usize::MAX));
|
||||
}
|
||||
char_pos += grapheme.doc_chars();
|
||||
|
||||
// check if any positions translated on the fly (like cursor) has been reached
|
||||
translate_positions(
|
||||
char_pos,
|
||||
first_visible_char_idx,
|
||||
translated_positions,
|
||||
text_fmt,
|
||||
renderer,
|
||||
pos,
|
||||
);
|
||||
|
||||
let grapheme_style = if let GraphemeSource::VirtualText { highlight } = grapheme.source {
|
||||
let style = renderer.text_style;
|
||||
if let Some(highlight) = highlight {
|
||||
style.patch(theme.highlight(highlight.0))
|
||||
} else {
|
||||
style
|
||||
}
|
||||
} else {
|
||||
style_span.0
|
||||
};
|
||||
|
||||
let virt = grapheme.is_virtual();
|
||||
renderer.draw_grapheme(
|
||||
grapheme.grapheme,
|
||||
grapheme_style,
|
||||
virt,
|
||||
&mut last_line_indent_level,
|
||||
&mut is_in_indent_area,
|
||||
pos,
|
||||
);
|
||||
}
|
||||
|
||||
renderer.draw_indent_guides(last_line_indent_level, last_line_pos.visual_line);
|
||||
for line_decoration in &mut *line_decorations {
|
||||
line_decoration.render_foreground(renderer, last_line_pos, char_pos);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TextRenderer<'a> {
|
||||
pub surface: &'a mut Surface,
|
||||
pub text_style: Style,
|
||||
pub whitespace_style: Style,
|
||||
pub indent_guide_char: String,
|
||||
pub indent_guide_style: Style,
|
||||
pub newline: String,
|
||||
pub nbsp: String,
|
||||
pub space: String,
|
||||
pub tab: String,
|
||||
pub virtual_tab: String,
|
||||
pub indent_width: u16,
|
||||
pub starting_indent: usize,
|
||||
pub draw_indent_guides: bool,
|
||||
pub col_offset: usize,
|
||||
pub viewport: Rect,
|
||||
}
|
||||
|
||||
impl<'a> TextRenderer<'a> {
|
||||
pub fn new(
|
||||
surface: &'a mut Surface,
|
||||
doc: &Document,
|
||||
theme: &Theme,
|
||||
col_offset: usize,
|
||||
viewport: Rect,
|
||||
) -> TextRenderer<'a> {
|
||||
let editor_config = doc.config.load();
|
||||
let WhitespaceConfig {
|
||||
render: ws_render,
|
||||
characters: ws_chars,
|
||||
} = &editor_config.whitespace;
|
||||
|
||||
let tab_width = doc.tab_width();
|
||||
let tab = if ws_render.tab() == WhitespaceRenderValue::All {
|
||||
std::iter::once(ws_chars.tab)
|
||||
.chain(std::iter::repeat(ws_chars.tabpad).take(tab_width - 1))
|
||||
.collect()
|
||||
} else {
|
||||
" ".repeat(tab_width)
|
||||
};
|
||||
let virtual_tab = " ".repeat(tab_width);
|
||||
let newline = if ws_render.newline() == WhitespaceRenderValue::All {
|
||||
ws_chars.newline.into()
|
||||
} else {
|
||||
" ".to_owned()
|
||||
};
|
||||
|
||||
let space = if ws_render.space() == WhitespaceRenderValue::All {
|
||||
ws_chars.space.into()
|
||||
} else {
|
||||
" ".to_owned()
|
||||
};
|
||||
let nbsp = if ws_render.nbsp() == WhitespaceRenderValue::All {
|
||||
ws_chars.nbsp.into()
|
||||
} else {
|
||||
" ".to_owned()
|
||||
};
|
||||
|
||||
let text_style = theme.get("ui.text");
|
||||
|
||||
let indent_width = doc.indent_style.indent_width(tab_width) as u16;
|
||||
|
||||
TextRenderer {
|
||||
surface,
|
||||
indent_guide_char: editor_config.indent_guides.character.into(),
|
||||
newline,
|
||||
nbsp,
|
||||
space,
|
||||
tab,
|
||||
virtual_tab,
|
||||
whitespace_style: theme.get("ui.virtual.whitespace"),
|
||||
indent_width,
|
||||
starting_indent: col_offset / indent_width as usize
|
||||
+ (col_offset % indent_width as usize != 0) as usize
|
||||
+ editor_config.indent_guides.skip_levels as usize,
|
||||
indent_guide_style: text_style.patch(
|
||||
theme
|
||||
.try_get("ui.virtual.indent-guide")
|
||||
.unwrap_or_else(|| theme.get("ui.virtual.whitespace")),
|
||||
),
|
||||
text_style,
|
||||
draw_indent_guides: editor_config.indent_guides.render,
|
||||
viewport,
|
||||
col_offset,
|
||||
}
|
||||
}
|
||||
|
||||
/// Draws a single `grapheme` at the current render position with a specified `style`.
|
||||
pub fn draw_grapheme(
|
||||
&mut self,
|
||||
grapheme: Grapheme,
|
||||
mut style: Style,
|
||||
is_virtual: bool,
|
||||
last_indent_level: &mut usize,
|
||||
is_in_indent_area: &mut bool,
|
||||
position: Position,
|
||||
) {
|
||||
let cut_off_start = self.col_offset.saturating_sub(position.col);
|
||||
let is_whitespace = grapheme.is_whitespace();
|
||||
|
||||
// TODO is it correct to apply the whitspace style to all unicode white spaces?
|
||||
if is_whitespace {
|
||||
style = style.patch(self.whitespace_style);
|
||||
}
|
||||
|
||||
let width = grapheme.width();
|
||||
let space = if is_virtual { " " } else { &self.space };
|
||||
let nbsp = if is_virtual { " " } else { &self.nbsp };
|
||||
let tab = if is_virtual {
|
||||
&self.virtual_tab
|
||||
} else {
|
||||
&self.tab
|
||||
};
|
||||
let grapheme = match grapheme {
|
||||
Grapheme::Tab { width } => {
|
||||
let grapheme_tab_width = char_to_byte_idx(tab, width);
|
||||
&tab[..grapheme_tab_width]
|
||||
}
|
||||
// TODO special rendering for other whitespaces?
|
||||
Grapheme::Other { ref g } if g == " " => space,
|
||||
Grapheme::Other { ref g } if g == "\u{00A0}" => nbsp,
|
||||
Grapheme::Other { ref g } => g,
|
||||
Grapheme::Newline => &self.newline,
|
||||
};
|
||||
|
||||
let in_bounds = self.col_offset <= position.col
|
||||
&& position.col < self.viewport.width as usize + self.col_offset;
|
||||
|
||||
if in_bounds {
|
||||
self.surface.set_string(
|
||||
self.viewport.x + (position.col - self.col_offset) as u16,
|
||||
self.viewport.y + position.row as u16,
|
||||
grapheme,
|
||||
style,
|
||||
);
|
||||
} else if cut_off_start != 0 && cut_off_start < width {
|
||||
// partially on screen
|
||||
let rect = Rect::new(
|
||||
self.viewport.x,
|
||||
self.viewport.y + position.row as u16,
|
||||
(width - cut_off_start) as u16,
|
||||
1,
|
||||
);
|
||||
self.surface.set_style(rect, style);
|
||||
}
|
||||
|
||||
if *is_in_indent_area && !is_whitespace {
|
||||
*last_indent_level = position.col;
|
||||
*is_in_indent_area = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Overlay indentation guides ontop of a rendered line
|
||||
/// The indentation level is computed in `draw_lines`.
|
||||
/// Therefore this function must always be called afterwards.
|
||||
pub fn draw_indent_guides(&mut self, indent_level: usize, row: u16) {
|
||||
if !self.draw_indent_guides {
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't draw indent guides outside of view
|
||||
let end_indent = min(
|
||||
indent_level,
|
||||
// Add indent_width - 1 to round up, since the first visible
|
||||
// indent might be a bit after offset.col
|
||||
self.col_offset + self.viewport.width as usize + (self.indent_width as usize - 1),
|
||||
) / self.indent_width as usize;
|
||||
|
||||
for i in self.starting_indent..end_indent {
|
||||
let x = (self.viewport.x as usize + (i * self.indent_width as usize) - self.col_offset)
|
||||
as u16;
|
||||
let y = self.viewport.y + row;
|
||||
debug_assert!(self.surface.in_bounds(x, y));
|
||||
self.surface
|
||||
.set_string(x, y, &self.indent_guide_char, self.indent_guide_style);
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue